Merge branch '3.0' of https://github.com/taosdata/TDengine into feat/TS-4994-3.0
This commit is contained in:
commit
6455c0980f
|
@ -7,8 +7,4 @@ java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -data
|
||||||
java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -database <db name> -doCreateTable false -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100
|
java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -database <db name> -doCreateTable false -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100
|
||||||
```
|
```
|
||||||
|
|
||||||
如果发生错误 Exception in thread "main" java.lang.UnsatisfiedLinkError: no taos in java.library.path
|
|
||||||
请检查是否安装 TDengine 客户端安装包或编译 TDengine 安装。如果确定已经安装过还出现这个错误,可以在命令行 java 后加 -Djava.library.path=/usr/lib 来指定寻找共享库的路径。
|
|
||||||
|
|
||||||
|
|
||||||
If you encounter the error Exception in thread "main" `java.lang.UnsatisfiedLinkError: no taos in java.library.path`, please check whether the TDengine client package is installed or TDengine is compiled and installed. If you are sure it is installed and still encounter this error, you can add `-Djava.library.path=/usr/lib` after the `java` command to specify the path to the shared library.
|
If you encounter the error Exception in thread "main" `java.lang.UnsatisfiedLinkError: no taos in java.library.path`, please check whether the TDengine client package is installed or TDengine is compiled and installed. If you are sure it is installed and still encounter this error, you can add `-Djava.library.path=/usr/lib` after the `java` command to specify the path to the shared library.
|
||||||
|
|
|
@ -194,10 +194,10 @@ extern int32_t tsMinIntervalTime;
|
||||||
extern int32_t tsMaxInsertBatchRows;
|
extern int32_t tsMaxInsertBatchRows;
|
||||||
|
|
||||||
// build info
|
// build info
|
||||||
extern char version[];
|
extern char td_version[];
|
||||||
extern char compatible_version[];
|
extern char td_compatible_version[];
|
||||||
extern char gitinfo[];
|
extern char td_gitinfo[];
|
||||||
extern char buildinfo[];
|
extern char td_buildinfo[];
|
||||||
|
|
||||||
// lossy
|
// lossy
|
||||||
extern char tsLossyColumns[];
|
extern char tsLossyColumns[];
|
||||||
|
|
|
@ -70,7 +70,8 @@ typedef struct SActiveCheckpointInfo SActiveCheckpointInfo;
|
||||||
#define SSTREAM_TASK_NEED_CONVERT_VER 2
|
#define SSTREAM_TASK_NEED_CONVERT_VER 2
|
||||||
#define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3
|
#define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3
|
||||||
|
|
||||||
extern int32_t streamMetaId;
|
extern int32_t streamMetaRefPool;
|
||||||
|
extern int32_t streamTaskRefPool;
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
STREAM_STATUS__NORMAL = 0,
|
STREAM_STATUS__NORMAL = 0,
|
||||||
|
@ -258,6 +259,7 @@ typedef struct STaskId {
|
||||||
typedef struct SStreamTaskId {
|
typedef struct SStreamTaskId {
|
||||||
int64_t streamId;
|
int64_t streamId;
|
||||||
int32_t taskId;
|
int32_t taskId;
|
||||||
|
int64_t refId;
|
||||||
const char* idStr;
|
const char* idStr;
|
||||||
} SStreamTaskId;
|
} SStreamTaskId;
|
||||||
|
|
||||||
|
@ -291,7 +293,6 @@ typedef struct SStreamStatus {
|
||||||
int8_t schedStatus;
|
int8_t schedStatus;
|
||||||
int8_t statusBackup;
|
int8_t statusBackup;
|
||||||
int32_t schedIdleTime; // idle time before invoke again
|
int32_t schedIdleTime; // idle time before invoke again
|
||||||
int32_t timerActive; // timer is active
|
|
||||||
int64_t lastExecTs; // last exec time stamp
|
int64_t lastExecTs; // last exec time stamp
|
||||||
int32_t inScanHistorySentinel;
|
int32_t inScanHistorySentinel;
|
||||||
bool appendTranstateBlock; // has append the transfer state data block already
|
bool appendTranstateBlock; // has append the transfer state data block already
|
||||||
|
@ -454,7 +455,6 @@ struct SStreamTask {
|
||||||
|
|
||||||
// the followings attributes don't be serialized
|
// the followings attributes don't be serialized
|
||||||
SScanhistorySchedInfo schedHistoryInfo;
|
SScanhistorySchedInfo schedHistoryInfo;
|
||||||
int32_t refCnt;
|
|
||||||
int32_t transferStateAlignCnt;
|
int32_t transferStateAlignCnt;
|
||||||
struct SStreamMeta* pMeta;
|
struct SStreamMeta* pMeta;
|
||||||
SSHashObj* pNameMap;
|
SSHashObj* pNameMap;
|
||||||
|
@ -546,7 +546,7 @@ typedef int32_t (*__state_trans_user_fn)(SStreamTask*, void* param);
|
||||||
|
|
||||||
int32_t tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool fillHistory, int64_t triggerParam,
|
int32_t tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool fillHistory, int64_t triggerParam,
|
||||||
SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5, SStreamTask** pTask);
|
SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5, SStreamTask** pTask);
|
||||||
void tFreeStreamTask(SStreamTask* pTask);
|
void tFreeStreamTask(void* pTask);
|
||||||
int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
|
int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
|
||||||
int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
|
int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
|
||||||
int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, int64_t ver);
|
int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, int64_t ver);
|
||||||
|
@ -664,6 +664,8 @@ void streamTaskResetStatus(SStreamTask* pTask);
|
||||||
void streamTaskSetStatusReady(SStreamTask* pTask);
|
void streamTaskSetStatusReady(SStreamTask* pTask);
|
||||||
ETaskStatus streamTaskGetPrevStatus(const SStreamTask* pTask);
|
ETaskStatus streamTaskGetPrevStatus(const SStreamTask* pTask);
|
||||||
const char* streamTaskGetExecType(int32_t type);
|
const char* streamTaskGetExecType(int32_t type);
|
||||||
|
int32_t streamTaskAllocRefId(SStreamTask* pTask, int64_t** pRefId);
|
||||||
|
void streamTaskFreeRefId(int64_t* pRefId);
|
||||||
|
|
||||||
bool streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList);
|
bool streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList);
|
||||||
void streamTaskResetUpstreamStageInfo(SStreamTask* pTask);
|
void streamTaskResetUpstreamStageInfo(SStreamTask* pTask);
|
||||||
|
@ -752,16 +754,15 @@ int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTa
|
||||||
int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
||||||
int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta);
|
int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta);
|
||||||
int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask);
|
int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask);
|
||||||
|
int32_t streamMetaAcquireTaskUnsafe(SStreamMeta* pMeta, STaskId* pId, SStreamTask** pTask);
|
||||||
int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask);
|
int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask);
|
||||||
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
||||||
int32_t streamMetaAcquireOneTask(SStreamTask* pTask);
|
|
||||||
void streamMetaClear(SStreamMeta* pMeta);
|
void streamMetaClear(SStreamMeta* pMeta);
|
||||||
void streamMetaInitBackend(SStreamMeta* pMeta);
|
void streamMetaInitBackend(SStreamMeta* pMeta);
|
||||||
int32_t streamMetaCommit(SStreamMeta* pMeta);
|
int32_t streamMetaCommit(SStreamMeta* pMeta);
|
||||||
int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta);
|
int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta);
|
||||||
void streamMetaNotifyClose(SStreamMeta* pMeta);
|
void streamMetaNotifyClose(SStreamMeta* pMeta);
|
||||||
void streamMetaStartHb(SStreamMeta* pMeta);
|
void streamMetaStartHb(SStreamMeta* pMeta);
|
||||||
bool streamMetaTaskInTimer(SStreamMeta* pMeta);
|
|
||||||
int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs,
|
int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs,
|
||||||
int64_t endTs, bool ready);
|
int64_t endTs, bool ready);
|
||||||
int32_t streamMetaInitStartInfo(STaskStartInfo* pStartInfo);
|
int32_t streamMetaInitStartInfo(STaskStartInfo* pStartInfo);
|
||||||
|
|
|
@ -20,11 +20,11 @@
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern char version[];
|
extern char td_version[];
|
||||||
extern char compatible_version[];
|
extern char td_compatible_version[];
|
||||||
extern char gitinfo[];
|
extern char td_gitinfo[];
|
||||||
extern char gitinfoOfInternal[];
|
extern char td_gitinfoOfInternal[];
|
||||||
extern char buildinfo[];
|
extern char td_buildinfo[];
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -375,7 +375,7 @@ int32_t openTransporter(const char *user, const char *auth, int32_t numOfThread,
|
||||||
rpcInit.startReadTimer = 1;
|
rpcInit.startReadTimer = 1;
|
||||||
rpcInit.readTimeout = tsReadTimeout;
|
rpcInit.readTimeout = tsReadTimeout;
|
||||||
|
|
||||||
int32_t code = taosVersionStrToInt(version, &(rpcInit.compatibilityVer));
|
int32_t code = taosVersionStrToInt(td_version, &rpcInit.compatibilityVer);
|
||||||
if (TSDB_CODE_SUCCESS != code) {
|
if (TSDB_CODE_SUCCESS != code) {
|
||||||
tscError("invalid version string.");
|
tscError("invalid version string.");
|
||||||
return code;
|
return code;
|
||||||
|
|
|
@ -1700,7 +1700,7 @@ static int32_t buildConnectMsg(SRequestObj* pRequest, SMsgSendInfo** pMsgSendInf
|
||||||
tstrncpy(connectReq.app, appInfo.appName, sizeof(connectReq.app));
|
tstrncpy(connectReq.app, appInfo.appName, sizeof(connectReq.app));
|
||||||
tstrncpy(connectReq.user, pObj->user, sizeof(connectReq.user));
|
tstrncpy(connectReq.user, pObj->user, sizeof(connectReq.user));
|
||||||
tstrncpy(connectReq.passwd, pObj->pass, sizeof(connectReq.passwd));
|
tstrncpy(connectReq.passwd, pObj->pass, sizeof(connectReq.passwd));
|
||||||
tstrncpy(connectReq.sVer, version, sizeof(connectReq.sVer));
|
tstrncpy(connectReq.sVer, td_version, sizeof(connectReq.sVer));
|
||||||
|
|
||||||
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq);
|
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq);
|
||||||
void* pReq = taosMemoryMalloc(contLen);
|
void* pReq = taosMemoryMalloc(contLen);
|
||||||
|
@ -1770,19 +1770,15 @@ void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t doProcessMsgFromServer(void* param) {
|
int32_t doProcessMsgFromServerImpl(SRpcMsg* pMsg, SEpSet* pEpSet) {
|
||||||
AsyncArg* arg = (AsyncArg*)param;
|
|
||||||
SRpcMsg* pMsg = &arg->msg;
|
|
||||||
SEpSet* pEpSet = arg->pEpset;
|
|
||||||
|
|
||||||
SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle;
|
SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle;
|
||||||
if (pMsg->info.ahandle == NULL) {
|
if (pMsg->info.ahandle == NULL) {
|
||||||
tscError("doProcessMsgFromServer pMsg->info.ahandle == NULL");
|
tscError("doProcessMsgFromServer pMsg->info.ahandle == NULL");
|
||||||
taosMemoryFree(arg->pEpset);
|
|
||||||
rpcFreeCont(pMsg->pCont);
|
rpcFreeCont(pMsg->pCont);
|
||||||
taosMemoryFree(arg);
|
taosMemoryFree(pEpSet);
|
||||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
STscObj* pTscObj = NULL;
|
STscObj* pTscObj = NULL;
|
||||||
|
|
||||||
STraceId* trace = &pMsg->info.traceId;
|
STraceId* trace = &pMsg->info.traceId;
|
||||||
|
@ -1802,10 +1798,9 @@ int32_t doProcessMsgFromServer(void* param) {
|
||||||
if (TSDB_CODE_SUCCESS != taosReleaseRef(clientReqRefPool, pSendInfo->requestObjRefId)) {
|
if (TSDB_CODE_SUCCESS != taosReleaseRef(clientReqRefPool, pSendInfo->requestObjRefId)) {
|
||||||
tscError("doProcessMsgFromServer taosReleaseRef failed");
|
tscError("doProcessMsgFromServer taosReleaseRef failed");
|
||||||
}
|
}
|
||||||
taosMemoryFree(arg->pEpset);
|
|
||||||
rpcFreeCont(pMsg->pCont);
|
rpcFreeCont(pMsg->pCont);
|
||||||
|
taosMemoryFree(pEpSet);
|
||||||
destroySendMsgInfo(pSendInfo);
|
destroySendMsgInfo(pSendInfo);
|
||||||
taosMemoryFree(arg);
|
|
||||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||||
}
|
}
|
||||||
pTscObj = pRequest->pTscObj;
|
pTscObj = pRequest->pTscObj;
|
||||||
|
@ -1844,20 +1839,24 @@ int32_t doProcessMsgFromServer(void* param) {
|
||||||
|
|
||||||
rpcFreeCont(pMsg->pCont);
|
rpcFreeCont(pMsg->pCont);
|
||||||
destroySendMsgInfo(pSendInfo);
|
destroySendMsgInfo(pSendInfo);
|
||||||
|
|
||||||
taosMemoryFree(arg);
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
int32_t doProcessMsgFromServer(void* param) {
|
||||||
|
AsyncArg* arg = (AsyncArg*)param;
|
||||||
|
int32_t code = doProcessMsgFromServerImpl(&arg->msg, arg->pEpset);
|
||||||
|
taosMemoryFree(arg);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
|
void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
|
||||||
|
int32_t code = 0;
|
||||||
SEpSet* tEpSet = NULL;
|
SEpSet* tEpSet = NULL;
|
||||||
if (pEpSet != NULL) {
|
if (pEpSet != NULL) {
|
||||||
tEpSet = taosMemoryCalloc(1, sizeof(SEpSet));
|
tEpSet = taosMemoryCalloc(1, sizeof(SEpSet));
|
||||||
if (NULL == tEpSet) {
|
if (NULL == tEpSet) {
|
||||||
pMsg->code = TSDB_CODE_OUT_OF_MEMORY;
|
code = terrno;
|
||||||
rpcFreeCont(pMsg->pCont);
|
pMsg->code = terrno;
|
||||||
destroySendMsgInfo(pMsg->info.ahandle);
|
goto _exit;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
(void)memcpy((void*)tEpSet, (void*)pEpSet, sizeof(SEpSet));
|
(void)memcpy((void*)tEpSet, (void*)pEpSet, sizeof(SEpSet));
|
||||||
}
|
}
|
||||||
|
@ -1879,21 +1878,25 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
|
||||||
|
|
||||||
AsyncArg* arg = taosMemoryCalloc(1, sizeof(AsyncArg));
|
AsyncArg* arg = taosMemoryCalloc(1, sizeof(AsyncArg));
|
||||||
if (NULL == arg) {
|
if (NULL == arg) {
|
||||||
pMsg->code = TSDB_CODE_OUT_OF_MEMORY;
|
code = terrno;
|
||||||
taosMemoryFree(tEpSet);
|
pMsg->code = code;
|
||||||
rpcFreeCont(pMsg->pCont);
|
goto _exit;
|
||||||
destroySendMsgInfo(pMsg->info.ahandle);
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
arg->msg = *pMsg;
|
arg->msg = *pMsg;
|
||||||
arg->pEpset = tEpSet;
|
arg->pEpset = tEpSet;
|
||||||
|
|
||||||
if (0 != taosAsyncExec(doProcessMsgFromServer, arg, NULL)) {
|
if ((code = taosAsyncExec(doProcessMsgFromServer, arg, NULL)) != 0) {
|
||||||
tscError("failed to sched msg to tsc, tsc ready to quit");
|
pMsg->code = code;
|
||||||
rpcFreeCont(pMsg->pCont);
|
|
||||||
taosMemoryFree(arg->pEpset);
|
|
||||||
destroySendMsgInfo(pMsg->info.ahandle);
|
|
||||||
taosMemoryFree(arg);
|
taosMemoryFree(arg);
|
||||||
|
goto _exit;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
_exit:
|
||||||
|
tscError("failed to sched msg to tsc since %s", tstrerror(code));
|
||||||
|
code = doProcessMsgFromServerImpl(pMsg, tEpSet);
|
||||||
|
if (code != 0) {
|
||||||
|
tscError("failed to sched msg to tsc, tsc ready quit");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2573,7 +2576,7 @@ TSDB_SERVER_STATUS taos_check_server_status(const char* fqdn, int port, char* de
|
||||||
rpcInit.connLimitNum = connLimitNum;
|
rpcInit.connLimitNum = connLimitNum;
|
||||||
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
|
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
|
||||||
rpcInit.readTimeout = tsReadTimeout;
|
rpcInit.readTimeout = tsReadTimeout;
|
||||||
if (TSDB_CODE_SUCCESS != taosVersionStrToInt(version, &(rpcInit.compatibilityVer))) {
|
if (TSDB_CODE_SUCCESS != taosVersionStrToInt(td_version, &rpcInit.compatibilityVer)) {
|
||||||
tscError("faild to convert taos version from str to int, errcode:%s", terrstr());
|
tscError("faild to convert taos version from str to int, errcode:%s", terrstr());
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
|
@ -669,7 +669,7 @@ const char *taos_data_type(int type) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const char *taos_get_client_info() { return version; }
|
const char *taos_get_client_info() { return td_version; }
|
||||||
|
|
||||||
// return int32_t
|
// return int32_t
|
||||||
int taos_affected_rows(TAOS_RES *res) {
|
int taos_affected_rows(TAOS_RES *res) {
|
||||||
|
@ -2159,4 +2159,4 @@ int taos_set_conn_mode(TAOS *taos, int mode, int value) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
char *getBuildInfo() { return buildinfo; }
|
char *getBuildInfo() { return td_buildinfo; }
|
||||||
|
|
|
@ -80,8 +80,8 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
||||||
goto End;
|
goto End;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((code = taosCheckVersionCompatibleFromStr(version, connectRsp.sVer, 3)) != 0) {
|
if ((code = taosCheckVersionCompatibleFromStr(td_version, connectRsp.sVer, 3)) != 0) {
|
||||||
tscError("version not compatible. client version: %s, server version: %s", version, connectRsp.sVer);
|
tscError("version not compatible. client version: %s, server version: %s", td_version, connectRsp.sVer);
|
||||||
goto End;
|
goto End;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -675,10 +675,10 @@ static int32_t taosAddSystemCfg(SConfig *pCfg) {
|
||||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "os version", info.version, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
TAOS_CHECK_RETURN(cfgAddString(pCfg, "os version", info.version, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
||||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "os machine", info.machine, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
TAOS_CHECK_RETURN(cfgAddString(pCfg, "os machine", info.machine, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
||||||
|
|
||||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "version", version, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
TAOS_CHECK_RETURN(cfgAddString(pCfg, "version", td_version, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
||||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "compatible_version", compatible_version, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
TAOS_CHECK_RETURN(cfgAddString(pCfg, "compatible_version", td_compatible_version, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
||||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "gitinfo", gitinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
TAOS_CHECK_RETURN(cfgAddString(pCfg, "gitinfo", td_gitinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
||||||
TAOS_CHECK_RETURN(cfgAddString(pCfg, "buildinfo", buildinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
TAOS_CHECK_RETURN(cfgAddString(pCfg, "buildinfo", td_buildinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE));
|
||||||
TAOS_RETURN(TSDB_CODE_SUCCESS);
|
TAOS_RETURN(TSDB_CODE_SUCCESS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1646,6 +1646,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
||||||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "checkpointInterval");
|
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "checkpointInterval");
|
||||||
tsStreamCheckpointInterval = pItem->i32;
|
tsStreamCheckpointInterval = pItem->i32;
|
||||||
|
|
||||||
|
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "concurrentCheckpoint");
|
||||||
|
tsMaxConcurrentCheckpoint = pItem->i32;
|
||||||
|
|
||||||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "streamSinkDataRate");
|
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "streamSinkDataRate");
|
||||||
tsSinkDataRate = pItem->fval;
|
tsSinkDataRate = pItem->fval;
|
||||||
|
|
||||||
|
|
|
@ -221,10 +221,9 @@ int32_t taosGenCrashJsonMsg(int signum, char** pMsg, int64_t clusterId, int64_t
|
||||||
}
|
}
|
||||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "memory", tmp), NULL, _exit);
|
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "memory", tmp), NULL, _exit);
|
||||||
|
|
||||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", version), NULL, _exit);
|
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", td_version), NULL, _exit);
|
||||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", buildinfo), NULL, _exit);
|
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", td_buildinfo), NULL, _exit);
|
||||||
|
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", td_gitinfo), NULL, _exit);
|
||||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", gitinfo), NULL, _exit);
|
|
||||||
|
|
||||||
TAOS_CHECK_GOTO(tjsonAddIntegerToObject(pJson, "crashSig", signum), NULL, _exit);
|
TAOS_CHECK_GOTO(tjsonAddIntegerToObject(pJson, "crashSig", signum), NULL, _exit);
|
||||||
TAOS_CHECK_GOTO(tjsonAddIntegerToObject(pJson, "crashTs", taosGetTimestampUs()), NULL, _exit);
|
TAOS_CHECK_GOTO(tjsonAddIntegerToObject(pJson, "crashTs", taosGetTimestampUs()), NULL, _exit);
|
||||||
|
|
|
@ -297,12 +297,13 @@ static void dmPrintArgs(int32_t argc, char const *argv[]) {
|
||||||
static void dmGenerateGrant() { mndGenerateMachineCode(); }
|
static void dmGenerateGrant() { mndGenerateMachineCode(); }
|
||||||
|
|
||||||
static void dmPrintVersion() {
|
static void dmPrintVersion() {
|
||||||
printf("%s\n%sd version: %s compatible_version: %s\n", TD_PRODUCT_NAME, CUS_PROMPT, version, compatible_version);
|
printf("%s\n%sd version: %s compatible_version: %s\n", TD_PRODUCT_NAME, CUS_PROMPT, td_version,
|
||||||
printf("git: %s\n", gitinfo);
|
td_compatible_version);
|
||||||
|
printf("git: %s\n", td_gitinfo);
|
||||||
#ifdef TD_ENTERPRISE
|
#ifdef TD_ENTERPRISE
|
||||||
printf("gitOfInternal: %s\n", gitinfoOfInternal);
|
printf("gitOfInternal: %s\n", td_gitinfoOfInternal);
|
||||||
#endif
|
#endif
|
||||||
printf("build: %s\n", buildinfo);
|
printf("build: %s\n", td_buildinfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dmPrintHelp() {
|
static void dmPrintHelp() {
|
||||||
|
|
|
@ -138,9 +138,9 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
|
||||||
pRpc->info.handle, pRpc->contLen, pRpc->code, pRpc->info.ahandle, pRpc->info.refId);
|
pRpc->info.handle, pRpc->contLen, pRpc->code, pRpc->info.ahandle, pRpc->info.refId);
|
||||||
|
|
||||||
int32_t svrVer = 0;
|
int32_t svrVer = 0;
|
||||||
code = taosVersionStrToInt(version, &svrVer);
|
code = taosVersionStrToInt(td_version, &svrVer);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
dError("failed to convert version string:%s to int, code:%d", version, code);
|
dError("failed to convert version string:%s to int, code:%d", td_version, code);
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
if ((code = taosCheckVersionCompatible(pRpc->info.cliVer, svrVer, 3)) != 0) {
|
if ((code = taosCheckVersionCompatible(pRpc->info.cliVer, svrVer, 3)) != 0) {
|
||||||
|
@ -434,8 +434,8 @@ int32_t dmInitClient(SDnode *pDnode) {
|
||||||
rpcInit.startReadTimer = 1;
|
rpcInit.startReadTimer = 1;
|
||||||
rpcInit.readTimeout = tsReadTimeout;
|
rpcInit.readTimeout = tsReadTimeout;
|
||||||
|
|
||||||
if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) {
|
if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) {
|
||||||
dError("failed to convert version string:%s to int", version);
|
dError("failed to convert version string:%s to int", td_version);
|
||||||
}
|
}
|
||||||
|
|
||||||
pTrans->clientRpc = rpcOpen(&rpcInit);
|
pTrans->clientRpc = rpcOpen(&rpcInit);
|
||||||
|
@ -483,8 +483,8 @@ int32_t dmInitStatusClient(SDnode *pDnode) {
|
||||||
rpcInit.startReadTimer = 0;
|
rpcInit.startReadTimer = 0;
|
||||||
rpcInit.readTimeout = 0;
|
rpcInit.readTimeout = 0;
|
||||||
|
|
||||||
if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) {
|
if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) {
|
||||||
dError("failed to convert version string:%s to int", version);
|
dError("failed to convert version string:%s to int", td_version);
|
||||||
}
|
}
|
||||||
|
|
||||||
pTrans->statusRpc = rpcOpen(&rpcInit);
|
pTrans->statusRpc = rpcOpen(&rpcInit);
|
||||||
|
@ -533,8 +533,8 @@ int32_t dmInitSyncClient(SDnode *pDnode) {
|
||||||
rpcInit.startReadTimer = 1;
|
rpcInit.startReadTimer = 1;
|
||||||
rpcInit.readTimeout = tsReadTimeout;
|
rpcInit.readTimeout = tsReadTimeout;
|
||||||
|
|
||||||
if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) {
|
if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) {
|
||||||
dError("failed to convert version string:%s to int", version);
|
dError("failed to convert version string:%s to int", td_version);
|
||||||
}
|
}
|
||||||
|
|
||||||
pTrans->syncRpc = rpcOpen(&rpcInit);
|
pTrans->syncRpc = rpcOpen(&rpcInit);
|
||||||
|
@ -588,8 +588,8 @@ int32_t dmInitServer(SDnode *pDnode) {
|
||||||
rpcInit.compressSize = tsCompressMsgSize;
|
rpcInit.compressSize = tsCompressMsgSize;
|
||||||
rpcInit.shareConnLimit = tsShareConnLimit * 16;
|
rpcInit.shareConnLimit = tsShareConnLimit * 16;
|
||||||
|
|
||||||
if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) {
|
if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) {
|
||||||
dError("failed to convert version string:%s to int", version);
|
dError("failed to convert version string:%s to int", td_version);
|
||||||
}
|
}
|
||||||
|
|
||||||
pTrans->serverRpc = rpcOpen(&rpcInit);
|
pTrans->serverRpc = rpcOpen(&rpcInit);
|
||||||
|
|
|
@ -54,7 +54,7 @@ void TestClient::DoInit() {
|
||||||
rpcInit.parent = this;
|
rpcInit.parent = this;
|
||||||
// rpcInit.secret = (char*)secretEncrypt;
|
// rpcInit.secret = (char*)secretEncrypt;
|
||||||
// rpcInit.spi = 1;
|
// rpcInit.spi = 1;
|
||||||
taosVersionStrToInt(version, &(rpcInit.compatibilityVer));
|
taosVersionStrToInt(td_version, &rpcInit.compatibilityVer);
|
||||||
|
|
||||||
clientRpc = rpcOpen(&rpcInit);
|
clientRpc = rpcOpen(&rpcInit);
|
||||||
ASSERT(clientRpc);
|
ASSERT(clientRpc);
|
||||||
|
|
|
@ -1021,7 +1021,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr
|
||||||
}
|
}
|
||||||
|
|
||||||
// cluster info
|
// cluster info
|
||||||
tstrncpy(pClusterInfo->version, version, sizeof(pClusterInfo->version));
|
tstrncpy(pClusterInfo->version, td_version, sizeof(pClusterInfo->version));
|
||||||
pClusterInfo->monitor_interval = tsMonitorInterval;
|
pClusterInfo->monitor_interval = tsMonitorInterval;
|
||||||
pClusterInfo->connections_total = mndGetNumOfConnections(pMnode);
|
pClusterInfo->connections_total = mndGetNumOfConnections(pMnode);
|
||||||
pClusterInfo->dbs_total = sdbGetSize(pSdb, SDB_DB);
|
pClusterInfo->dbs_total = sdbGetSize(pSdb, SDB_DB);
|
||||||
|
|
|
@ -239,8 +239,8 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((code = taosCheckVersionCompatibleFromStr(connReq.sVer, version, 3)) != 0) {
|
if ((code = taosCheckVersionCompatibleFromStr(connReq.sVer, td_version, 3)) != 0) {
|
||||||
mGError("version not compatible. client version: %s, server version: %s", connReq.sVer, version);
|
mGError("version not compatible. client version: %s, server version: %s", connReq.sVer, td_version);
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -308,9 +308,9 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
|
||||||
tstrncpy(connectRsp.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN);
|
tstrncpy(connectRsp.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN);
|
||||||
connectRsp.whiteListVer = pUser->ipWhiteListVer;
|
connectRsp.whiteListVer = pUser->ipWhiteListVer;
|
||||||
|
|
||||||
(void)strcpy(connectRsp.sVer, version);
|
tstrncpy(connectRsp.sVer, td_version, sizeof(connectRsp.sVer));
|
||||||
(void)snprintf(connectRsp.sDetailVer, sizeof(connectRsp.sDetailVer), "ver:%s\nbuild:%s\ngitinfo:%s", version,
|
(void)snprintf(connectRsp.sDetailVer, sizeof(connectRsp.sDetailVer), "ver:%s\nbuild:%s\ngitinfo:%s", td_version,
|
||||||
buildinfo, gitinfo);
|
td_buildinfo, td_gitinfo);
|
||||||
mndGetMnodeEpSet(pMnode, &connectRsp.epSet);
|
mndGetMnodeEpSet(pMnode, &connectRsp.epSet);
|
||||||
|
|
||||||
int32_t contLen = tSerializeSConnectRsp(NULL, 0, &connectRsp);
|
int32_t contLen = tSerializeSConnectRsp(NULL, 0, &connectRsp);
|
||||||
|
@ -813,7 +813,7 @@ static int32_t mndProcessSvrVerReq(SRpcMsg *pReq) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
int32_t lino = 0;
|
int32_t lino = 0;
|
||||||
SServerVerRsp rsp = {0};
|
SServerVerRsp rsp = {0};
|
||||||
tstrncpy(rsp.ver, version, sizeof(rsp.ver));
|
tstrncpy(rsp.ver, td_version, sizeof(rsp.ver));
|
||||||
|
|
||||||
int32_t contLen = tSerializeSServerVerRsp(NULL, 0, &rsp);
|
int32_t contLen = tSerializeSServerVerRsp(NULL, 0, &rsp);
|
||||||
if (contLen < 0) {
|
if (contLen < 0) {
|
||||||
|
|
|
@ -248,7 +248,7 @@ static int32_t doAddSinkTask(SStreamObj* pStream, SMnode* pMnode, SVgObj* pVgrou
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
mDebug("doAddSinkTask taskId:%s, vgId:%d, isFillHistory:%d", pTask->id.idStr, pVgroup->vgId, isFillhistory);
|
mDebug("doAddSinkTask taskId:%s, %p vgId:%d, isFillHistory:%d", pTask->id.idStr, pTask, pVgroup->vgId, isFillhistory);
|
||||||
|
|
||||||
pTask->info.nodeId = pVgroup->vgId;
|
pTask->info.nodeId = pVgroup->vgId;
|
||||||
pTask->info.epSet = mndGetVgroupEpset(pMnode, pVgroup);
|
pTask->info.epSet = mndGetVgroupEpset(pMnode, pVgroup);
|
||||||
|
@ -364,12 +364,13 @@ static int32_t buildSourceTask(SStreamObj* pStream, SEpSet* pEpset, bool isFillh
|
||||||
static void addNewTaskList(SStreamObj* pStream) {
|
static void addNewTaskList(SStreamObj* pStream) {
|
||||||
SArray* pTaskList = taosArrayInit(0, POINTER_BYTES);
|
SArray* pTaskList = taosArrayInit(0, POINTER_BYTES);
|
||||||
if (taosArrayPush(pStream->tasks, &pTaskList) == NULL) {
|
if (taosArrayPush(pStream->tasks, &pTaskList) == NULL) {
|
||||||
mError("failed to put array");
|
mError("failed to put into array");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pStream->conf.fillHistory) {
|
if (pStream->conf.fillHistory) {
|
||||||
pTaskList = taosArrayInit(0, POINTER_BYTES);
|
pTaskList = taosArrayInit(0, POINTER_BYTES);
|
||||||
if (taosArrayPush(pStream->pHTasksList, &pTaskList) == NULL) {
|
if (taosArrayPush(pStream->pHTasksList, &pTaskList) == NULL) {
|
||||||
mError("failed to put array");
|
mError("failed to put into array");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -402,7 +403,8 @@ static int32_t doAddSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pStre
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
mDebug("doAddSourceTask taskId:%s, vgId:%d, isFillHistory:%d", pTask->id.idStr, pVgroup->vgId, isFillhistory);
|
mDebug("doAddSourceTask taskId:%s, %p vgId:%d, isFillHistory:%d", pTask->id.idStr, pTask, pVgroup->vgId,
|
||||||
|
isFillhistory);
|
||||||
|
|
||||||
if (pStream->conf.fillHistory) {
|
if (pStream->conf.fillHistory) {
|
||||||
haltInitialTaskStatus(pTask, plan, isFillhistory);
|
haltInitialTaskStatus(pTask, plan, isFillhistory);
|
||||||
|
@ -512,19 +514,20 @@ static int32_t doAddAggTask(SStreamObj* pStream, SMnode* pMnode, SSubplan* plan,
|
||||||
SSnodeObj* pSnode, bool isFillhistory, bool useTriggerParam) {
|
SSnodeObj* pSnode, bool isFillhistory, bool useTriggerParam) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
SStreamTask* pTask = NULL;
|
SStreamTask* pTask = NULL;
|
||||||
|
const char* id = NULL;
|
||||||
|
|
||||||
code = buildAggTask(pStream, pEpset, isFillhistory, useTriggerParam, &pTask);
|
code = buildAggTask(pStream, pEpset, isFillhistory, useTriggerParam, &pTask);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
id = pTask->id.idStr;
|
||||||
if (pSnode != NULL) {
|
if (pSnode != NULL) {
|
||||||
code = mndAssignStreamTaskToSnode(pMnode, pTask, plan, pSnode);
|
code = mndAssignStreamTaskToSnode(pMnode, pTask, plan, pSnode);
|
||||||
mDebug("doAddAggTask taskId:%s, snode id:%d, isFillHistory:%d", pTask->id.idStr, pSnode->id, isFillhistory);
|
mDebug("doAddAggTask taskId:%s, %p snode id:%d, isFillHistory:%d", id, pTask, pSnode->id, isFillhistory);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
code = mndAssignStreamTaskToVgroup(pMnode, pTask, plan, pVgroup);
|
code = mndAssignStreamTaskToVgroup(pMnode, pTask, plan, pVgroup);
|
||||||
mDebug("doAddAggTask taskId:%s, vgId:%d, isFillHistory:%d", pTask->id.idStr, pVgroup->vgId, isFillhistory);
|
mDebug("doAddAggTask taskId:%s, %p vgId:%d, isFillHistory:%d", id, pTask, pVgroup->vgId, isFillhistory);
|
||||||
}
|
}
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
@ -678,7 +681,7 @@ static int32_t doScheduleStream(SStreamObj* pStream, SMnode* pMnode, SQueryPlan*
|
||||||
if (numOfPlanLevel > 1 || externalTargetDB || multiTarget || pStream->fixedSinkVgId) {
|
if (numOfPlanLevel > 1 || externalTargetDB || multiTarget || pStream->fixedSinkVgId) {
|
||||||
// add extra sink
|
// add extra sink
|
||||||
hasExtraSink = true;
|
hasExtraSink = true;
|
||||||
int32_t code = addSinkTask(pMnode, pStream, pEpset);
|
code = addSinkTask(pMnode, pStream, pEpset);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1294,9 +1294,10 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) {
|
||||||
void* p = taosArrayPush(pList, &in);
|
void* p = taosArrayPush(pList, &in);
|
||||||
if (p) {
|
if (p) {
|
||||||
int32_t currentSize = taosArrayGetSize(pList);
|
int32_t currentSize = taosArrayGetSize(pList);
|
||||||
mDebug("stream:%s (uid:0x%" PRIx64 ") checkpoint interval beyond threshold: %ds(%" PRId64
|
mDebug("stream:%s (uid:0x%" PRIx64 ") total %d stream(s) beyond chpt interval threshold: %ds(%" PRId64
|
||||||
"s) beyond concurrently launch threshold:%d",
|
"s), concurrently launch threshold:%d",
|
||||||
pStream->name, pStream->uid, tsStreamCheckpointInterval, duration / 1000, currentSize);
|
pStream->name, pStream->uid, currentSize, tsStreamCheckpointInterval, duration / 1000,
|
||||||
|
tsMaxConcurrentCheckpoint);
|
||||||
} else {
|
} else {
|
||||||
mError("failed to record the checkpoint interval info, stream:0x%" PRIx64, pStream->uid);
|
mError("failed to record the checkpoint interval info, stream:0x%" PRIx64, pStream->uid);
|
||||||
}
|
}
|
||||||
|
@ -1348,7 +1349,7 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) {
|
||||||
code = mndProcessStreamCheckpointTrans(pMnode, p, checkpointId, 1, true);
|
code = mndProcessStreamCheckpointTrans(pMnode, p, checkpointId, 1, true);
|
||||||
sdbRelease(pSdb, p);
|
sdbRelease(pSdb, p);
|
||||||
|
|
||||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
if (code == 0 || code == TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||||
started += 1;
|
started += 1;
|
||||||
|
|
||||||
if (started >= capacity) {
|
if (started >= capacity) {
|
||||||
|
@ -1356,6 +1357,8 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) {
|
||||||
(started + numOfCheckpointTrans));
|
(started + numOfCheckpointTrans));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
mError("failed to start checkpoint trans, code:%s", tstrerror(code));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -115,9 +115,9 @@ static char* mndBuildTelemetryReport(SMnode* pMnode) {
|
||||||
snprintf(tmp, sizeof(tmp), "%" PRId64 " kB", tsTotalMemoryKB);
|
snprintf(tmp, sizeof(tmp), "%" PRId64 " kB", tsTotalMemoryKB);
|
||||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "memory", tmp), &lino, _OVER);
|
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "memory", tmp), &lino, _OVER);
|
||||||
|
|
||||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", version), &lino, _OVER);
|
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", td_version), &lino, _OVER);
|
||||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", buildinfo), &lino, _OVER);
|
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", td_buildinfo), &lino, _OVER);
|
||||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", gitinfo), &lino, _OVER);
|
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", td_gitinfo), &lino, _OVER);
|
||||||
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "email", pMgmt->email), &lino, _OVER);
|
TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "email", pMgmt->email), &lino, _OVER);
|
||||||
|
|
||||||
mndBuildRuntimeInfo(pMnode, pJson);
|
mndBuildRuntimeInfo(pMnode, pJson);
|
||||||
|
|
|
@ -39,7 +39,7 @@ TEST_F(MndTestProfile, 01_ConnectMsg) {
|
||||||
strcpy(connectReq.db, "");
|
strcpy(connectReq.db, "");
|
||||||
strcpy(connectReq.user, "root");
|
strcpy(connectReq.user, "root");
|
||||||
strcpy(connectReq.passwd, secretEncrypt);
|
strcpy(connectReq.passwd, secretEncrypt);
|
||||||
strcpy(connectReq.sVer, version);
|
strcpy(connectReq.sVer, td_version);
|
||||||
|
|
||||||
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq);
|
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq);
|
||||||
void* pReq = rpcMallocCont(contLen);
|
void* pReq = rpcMallocCont(contLen);
|
||||||
|
@ -76,7 +76,7 @@ TEST_F(MndTestProfile, 02_ConnectMsg_NotExistDB) {
|
||||||
strcpy(connectReq.db, "not_exist_db");
|
strcpy(connectReq.db, "not_exist_db");
|
||||||
strcpy(connectReq.user, "root");
|
strcpy(connectReq.user, "root");
|
||||||
strcpy(connectReq.passwd, secretEncrypt);
|
strcpy(connectReq.passwd, secretEncrypt);
|
||||||
strcpy(connectReq.sVer, version);
|
strcpy(connectReq.sVer, td_version);
|
||||||
|
|
||||||
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq);
|
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq);
|
||||||
void* pReq = rpcMallocCont(contLen);
|
void* pReq = rpcMallocCont(contLen);
|
||||||
|
|
|
@ -64,7 +64,7 @@ TEST_F(MndTestShow, 03_ShowMsg_Conn) {
|
||||||
strcpy(connectReq.db, "");
|
strcpy(connectReq.db, "");
|
||||||
strcpy(connectReq.user, "root");
|
strcpy(connectReq.user, "root");
|
||||||
strcpy(connectReq.passwd, secretEncrypt);
|
strcpy(connectReq.passwd, secretEncrypt);
|
||||||
strcpy(connectReq.sVer, version);
|
strcpy(connectReq.sVer, td_version);
|
||||||
|
|
||||||
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq);
|
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq);
|
||||||
void* pReq = rpcMallocCont(contLen);
|
void* pReq = rpcMallocCont(contLen);
|
||||||
|
|
|
@ -38,24 +38,23 @@ int32_t sndBuildStreamTask(SSnode *pSnode, SStreamTask *pTask, int64_t nextProce
|
||||||
streamTaskOpenAllUpstreamInput(pTask);
|
streamTaskOpenAllUpstreamInput(pTask);
|
||||||
|
|
||||||
streamTaskResetUpstreamStageInfo(pTask);
|
streamTaskResetUpstreamStageInfo(pTask);
|
||||||
streamSetupScheduleTrigger(pTask);
|
|
||||||
|
|
||||||
SCheckpointInfo *pChkInfo = &pTask->chkInfo;
|
SCheckpointInfo *pChkInfo = &pTask->chkInfo;
|
||||||
tqSetRestoreVersionInfo(pTask);
|
tqSetRestoreVersionInfo(pTask);
|
||||||
|
|
||||||
char *p = streamTaskGetStatus(pTask).name;
|
char *p = streamTaskGetStatus(pTask).name;
|
||||||
if (pTask->info.fillHistory) {
|
if (pTask->info.fillHistory) {
|
||||||
sndInfo("vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
sndInfo("vgId:%d build stream task, s-task:%s, %p checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||||
" nextProcessVer:%" PRId64
|
" nextProcessVer:%" PRId64
|
||||||
" child id:%d, level:%d, status:%s fill-history:%d, related stream task:0x%x trigger:%" PRId64 " ms",
|
" child id:%d, level:%d, status:%s fill-history:%d, related stream task:0x%x trigger:%" PRId64 " ms",
|
||||||
SNODE_HANDLE, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
SNODE_HANDLE, pTask->id.idStr, pTask, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||||
pTask->info.selfChildId, pTask->info.taskLevel, p, pTask->info.fillHistory,
|
pTask->info.selfChildId, pTask->info.taskLevel, p, pTask->info.fillHistory,
|
||||||
(int32_t)pTask->streamTaskId.taskId, pTask->info.delaySchedParam);
|
(int32_t)pTask->streamTaskId.taskId, pTask->info.delaySchedParam);
|
||||||
} else {
|
} else {
|
||||||
sndInfo("vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
sndInfo("vgId:%d build stream task, s-task:%s, %p checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||||
" nextProcessVer:%" PRId64
|
" nextProcessVer:%" PRId64
|
||||||
" child id:%d, level:%d, status:%s fill-history:%d, related fill-task:0x%x trigger:%" PRId64 " ms",
|
" child id:%d, level:%d, status:%s fill-history:%d, related fill-task:0x%x trigger:%" PRId64 " ms",
|
||||||
SNODE_HANDLE, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
SNODE_HANDLE, pTask->id.idStr, pTask, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||||
pTask->info.selfChildId, pTask->info.taskLevel, p, pTask->info.fillHistory,
|
pTask->info.selfChildId, pTask->info.taskLevel, p, pTask->info.fillHistory,
|
||||||
(int32_t)pTask->hTaskInfo.id.taskId, pTask->info.delaySchedParam);
|
(int32_t)pTask->hTaskInfo.id.taskId, pTask->info.delaySchedParam);
|
||||||
}
|
}
|
||||||
|
|
|
@ -238,13 +238,18 @@ int32_t tdFetchTbUidList(SSma *pSma, STbUidStore **ppStore, tb_uid_t suid, tb_ui
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tdRSmaTaskInit(SStreamMeta *pMeta, SRSmaInfoItem *pItem, SStreamTaskId *pId) {
|
static void tdRSmaTaskInit(SStreamMeta *pMeta, SRSmaInfoItem *pItem, SStreamTaskId *pId) {
|
||||||
STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId};
|
STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId};
|
||||||
|
SStreamTask *pTask = NULL;
|
||||||
|
|
||||||
streamMetaRLock(pMeta);
|
streamMetaRLock(pMeta);
|
||||||
SStreamTask **ppTask = (SStreamTask **)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
|
||||||
if (ppTask && *ppTask) {
|
int32_t code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask);
|
||||||
pItem->submitReqVer = (*ppTask)->chkInfo.checkpointVer;
|
if (code == 0) {
|
||||||
pItem->fetchResultVer = (*ppTask)->info.delaySchedParam;
|
pItem->submitReqVer = pTask->chkInfo.checkpointVer;
|
||||||
|
pItem->fetchResultVer = pTask->info.delaySchedParam;
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMetaRUnLock(pMeta);
|
streamMetaRUnLock(pMeta);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -765,7 +765,6 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV
|
||||||
}
|
}
|
||||||
|
|
||||||
streamTaskResetUpstreamStageInfo(pTask);
|
streamTaskResetUpstreamStageInfo(pTask);
|
||||||
streamSetupScheduleTrigger(pTask);
|
|
||||||
|
|
||||||
SCheckpointInfo* pChkInfo = &pTask->chkInfo;
|
SCheckpointInfo* pChkInfo = &pTask->chkInfo;
|
||||||
tqSetRestoreVersionInfo(pTask);
|
tqSetRestoreVersionInfo(pTask);
|
||||||
|
@ -774,19 +773,19 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV
|
||||||
const char* pNext = streamTaskGetStatusStr(pTask->status.taskStatus);
|
const char* pNext = streamTaskGetStatusStr(pTask->status.taskStatus);
|
||||||
|
|
||||||
if (pTask->info.fillHistory) {
|
if (pTask->info.fillHistory) {
|
||||||
tqInfo("vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
tqInfo("vgId:%d build stream task, s-task:%s, %p checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||||
" nextProcessVer:%" PRId64
|
" nextProcessVer:%" PRId64
|
||||||
" child id:%d, level:%d, cur-status:%s, next-status:%s fill-history:%d, related stream task:0x%x "
|
" child id:%d, level:%d, cur-status:%s, next-status:%s fill-history:%d, related stream task:0x%x "
|
||||||
"delaySched:%" PRId64 " ms, inputVer:%" PRId64,
|
"delaySched:%" PRId64 " ms, inputVer:%" PRId64,
|
||||||
vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
vgId, pTask->id.idStr, pTask, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||||
pTask->info.selfChildId, pTask->info.taskLevel, p, pNext, pTask->info.fillHistory,
|
pTask->info.selfChildId, pTask->info.taskLevel, p, pNext, pTask->info.fillHistory,
|
||||||
(int32_t)pTask->streamTaskId.taskId, pTask->info.delaySchedParam, nextProcessVer);
|
(int32_t)pTask->streamTaskId.taskId, pTask->info.delaySchedParam, nextProcessVer);
|
||||||
} else {
|
} else {
|
||||||
tqInfo("vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
tqInfo("vgId:%d build stream task, s-task:%s, %p checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||||
" nextProcessVer:%" PRId64
|
" nextProcessVer:%" PRId64
|
||||||
" child id:%d, level:%d, cur-status:%s next-status:%s fill-history:%d, related fill-task:0x%x "
|
" child id:%d, level:%d, cur-status:%s next-status:%s fill-history:%d, related fill-task:0x%x "
|
||||||
"delaySched:%" PRId64 " ms, inputVer:%" PRId64,
|
"delaySched:%" PRId64 " ms, inputVer:%" PRId64,
|
||||||
vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
vgId, pTask->id.idStr, pTask, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||||
pTask->info.selfChildId, pTask->info.taskLevel, p, pNext, pTask->info.fillHistory,
|
pTask->info.selfChildId, pTask->info.taskLevel, p, pNext, pTask->info.fillHistory,
|
||||||
(int32_t)pTask->hTaskInfo.id.taskId, pTask->info.delaySchedParam, nextProcessVer);
|
(int32_t)pTask->hTaskInfo.id.taskId, pTask->info.delaySchedParam, nextProcessVer);
|
||||||
|
|
||||||
|
|
|
@ -1113,12 +1113,20 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamTask* pTask = *(SStreamTask**)pIter;
|
int64_t refId = *(int64_t*)pIter;
|
||||||
if ((pTask->info.taskLevel == TASK_LEVEL__SOURCE) && (pTask->exec.pExecutor != NULL)) {
|
SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, refId);
|
||||||
int32_t code = qUpdateTableListForStreamScanner(pTask->exec.pExecutor, tbUidList, isAdd);
|
if (pTask != NULL) {
|
||||||
if (code != 0) {
|
int32_t taskId = pTask->id.taskId;
|
||||||
tqError("vgId:%d, s-task:%s update qualified table error for stream task", vgId, pTask->id.idStr);
|
|
||||||
continue;
|
if ((pTask->info.taskLevel == TASK_LEVEL__SOURCE) && (pTask->exec.pExecutor != NULL)) {
|
||||||
|
int32_t code = qUpdateTableListForStreamScanner(pTask->exec.pExecutor, tbUidList, isAdd);
|
||||||
|
if (code != 0) {
|
||||||
|
tqError("vgId:%d, s-task:0x%x update qualified table error for stream task", vgId, taskId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
int32_t ret = taosReleaseRef(streamTaskRefPool, refId);
|
||||||
|
if (ret) {
|
||||||
|
tqError("vgId:%d release task refId failed, refId:%" PRId64, vgId, refId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,7 +79,7 @@ static void doStartScanWal(void* param, void* tmrId) {
|
||||||
|
|
||||||
SBuildScanWalMsgParam* pParam = (SBuildScanWalMsgParam*)param;
|
SBuildScanWalMsgParam* pParam = (SBuildScanWalMsgParam*)param;
|
||||||
|
|
||||||
SStreamMeta* pMeta = taosAcquireRef(streamMetaId, pParam->metaId);
|
SStreamMeta* pMeta = taosAcquireRef(streamMetaRefPool, pParam->metaId);
|
||||||
if (pMeta == NULL) {
|
if (pMeta == NULL) {
|
||||||
tqError("metaRid:%" PRId64 " not valid now, stream meta has been freed", pParam->metaId);
|
tqError("metaRid:%" PRId64 " not valid now, stream meta has been freed", pParam->metaId);
|
||||||
taosMemoryFree(pParam);
|
taosMemoryFree(pParam);
|
||||||
|
@ -97,7 +97,7 @@ static void doStartScanWal(void* param, void* tmrId) {
|
||||||
tqError("vgId:%d failed sched task to scan wal, code:%s", vgId, tstrerror(code));
|
tqError("vgId:%d failed sched task to scan wal, code:%s", vgId, tstrerror(code));
|
||||||
}
|
}
|
||||||
|
|
||||||
code = taosReleaseRef(streamMetaId, pParam->metaId);
|
code = taosReleaseRef(streamMetaRefPool, pParam->metaId);
|
||||||
if (code) {
|
if (code) {
|
||||||
tqError("vgId:%d failed to release ref for streamMeta, rid:%" PRId64 " code:%s", vgId, pParam->metaId,
|
tqError("vgId:%d failed to release ref for streamMeta, rid:%" PRId64 " code:%s", vgId, pParam->metaId,
|
||||||
tstrerror(code));
|
tstrerror(code));
|
||||||
|
|
|
@ -685,19 +685,21 @@ int32_t tqGetStreamExecInfo(SVnode* pVnode, int64_t streamId, int64_t* pDelay, b
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId};
|
STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId};
|
||||||
SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
SStreamTask* pTask = NULL;
|
||||||
if (ppTask == NULL) {
|
|
||||||
|
code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask);
|
||||||
|
if (code != 0) {
|
||||||
tqError("vgId:%d failed to acquire task:0x%x in retrieving progress", pMeta->vgId, pId->taskId);
|
tqError("vgId:%d failed to acquire task:0x%x in retrieving progress", pMeta->vgId, pId->taskId);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((*ppTask)->info.taskLevel != TASK_LEVEL__SOURCE) {
|
if (pTask->info.taskLevel != TASK_LEVEL__SOURCE) {
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// here we get the required stream source task
|
// here we get the required stream source task
|
||||||
SStreamTask* pTask = *ppTask;
|
|
||||||
*fhFinished = !HAS_RELATED_FILLHISTORY_TASK(pTask);
|
*fhFinished = !HAS_RELATED_FILLHISTORY_TASK(pTask);
|
||||||
|
|
||||||
int64_t ver = walReaderGetCurrentVer(pTask->exec.pWalReader);
|
int64_t ver = walReaderGetCurrentVer(pTask->exec.pWalReader);
|
||||||
|
@ -713,6 +715,7 @@ int32_t tqGetStreamExecInfo(SVnode* pVnode, int64_t streamId, int64_t* pDelay, b
|
||||||
SWalReader* pReader = walOpenReader(pTask->exec.pWalReader->pWal, NULL, 0);
|
SWalReader* pReader = walOpenReader(pTask->exec.pWalReader->pWal, NULL, 0);
|
||||||
if (pReader == NULL) {
|
if (pReader == NULL) {
|
||||||
tqError("failed to open wal reader to extract exec progress, vgId:%d", pMeta->vgId);
|
tqError("failed to open wal reader to extract exec progress, vgId:%d", pMeta->vgId);
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -738,6 +741,7 @@ int32_t tqGetStreamExecInfo(SVnode* pVnode, int64_t streamId, int64_t* pDelay, b
|
||||||
}
|
}
|
||||||
|
|
||||||
walCloseReader(pReader);
|
walCloseReader(pReader);
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMetaRUnLock(pMeta);
|
streamMetaRUnLock(pMeta);
|
||||||
|
|
|
@ -138,13 +138,15 @@ int32_t tqStreamStartOneTaskAsync(SStreamMeta* pMeta, SMsgCb* cb, int64_t stream
|
||||||
|
|
||||||
// this is to process request from transaction, always return true.
|
// this is to process request from transaction, always return true.
|
||||||
int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pMsg, bool restored) {
|
int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pMsg, bool restored) {
|
||||||
int32_t vgId = pMeta->vgId;
|
int32_t vgId = pMeta->vgId;
|
||||||
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||||
SRpcMsg rsp = {.info = pMsg->info, .code = TSDB_CODE_SUCCESS};
|
SRpcMsg rsp = {.info = pMsg->info, .code = TSDB_CODE_SUCCESS};
|
||||||
int64_t st = taosGetTimestampMs();
|
int64_t st = taosGetTimestampMs();
|
||||||
bool updated = false;
|
bool updated = false;
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
SStreamTask* pTask = NULL;
|
||||||
|
SStreamTask* pHTask = NULL;
|
||||||
|
|
||||||
SStreamTaskNodeUpdateMsg req = {0};
|
SStreamTaskNodeUpdateMsg req = {0};
|
||||||
|
|
||||||
|
@ -170,9 +172,9 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
||||||
streamMetaWLock(pMeta);
|
streamMetaWLock(pMeta);
|
||||||
|
|
||||||
// the task epset may be updated again and again, when replaying the WAL, the task may be in stop status.
|
// the task epset may be updated again and again, when replaying the WAL, the task may be in stop status.
|
||||||
STaskId id = {.streamId = req.streamId, .taskId = req.taskId};
|
STaskId id = {.streamId = req.streamId, .taskId = req.taskId};
|
||||||
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask);
|
||||||
if (ppTask == NULL || *ppTask == NULL) {
|
if (code != 0) {
|
||||||
tqError("vgId:%d failed to acquire task:0x%x when handling update task epset, it may have been dropped", vgId,
|
tqError("vgId:%d failed to acquire task:0x%x when handling update task epset, it may have been dropped", vgId,
|
||||||
req.taskId);
|
req.taskId);
|
||||||
rsp.code = TSDB_CODE_SUCCESS;
|
rsp.code = TSDB_CODE_SUCCESS;
|
||||||
|
@ -181,12 +183,13 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
||||||
return rsp.code;
|
return rsp.code;
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamTask* pTask = *ppTask;
|
const char* idstr = pTask->id.idStr;
|
||||||
const char* idstr = pTask->id.idStr;
|
|
||||||
|
|
||||||
if (req.transId <= 0) {
|
if (req.transId <= 0) {
|
||||||
tqError("vgId:%d invalid update nodeEp task, transId:%d, discard", vgId, req.taskId);
|
tqError("vgId:%d invalid update nodeEp task, transId:%d, discard", vgId, req.taskId);
|
||||||
rsp.code = TSDB_CODE_SUCCESS;
|
rsp.code = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
|
|
||||||
taosArrayDestroy(req.pNodeList);
|
taosArrayDestroy(req.pNodeList);
|
||||||
|
@ -197,6 +200,8 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
||||||
bool update = streamMetaInitUpdateTaskList(pMeta, req.transId);
|
bool update = streamMetaInitUpdateTaskList(pMeta, req.transId);
|
||||||
if (!update) {
|
if (!update) {
|
||||||
rsp.code = TSDB_CODE_SUCCESS;
|
rsp.code = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
|
|
||||||
taosArrayDestroy(req.pNodeList);
|
taosArrayDestroy(req.pNodeList);
|
||||||
|
@ -211,7 +216,10 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
||||||
tqDebug("s-task:%s (vgId:%d) already update in transId:%d, discard the nodeEp update msg", idstr, vgId,
|
tqDebug("s-task:%s (vgId:%d) already update in transId:%d, discard the nodeEp update msg", idstr, vgId,
|
||||||
req.transId);
|
req.transId);
|
||||||
rsp.code = TSDB_CODE_SUCCESS;
|
rsp.code = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
|
|
||||||
taosArrayDestroy(req.pNodeList);
|
taosArrayDestroy(req.pNodeList);
|
||||||
return rsp.code;
|
return rsp.code;
|
||||||
}
|
}
|
||||||
|
@ -227,24 +235,23 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
||||||
|
|
||||||
streamTaskStopMonitorCheckRsp(&pTask->taskCheckInfo, pTask->id.idStr);
|
streamTaskStopMonitorCheckRsp(&pTask->taskCheckInfo, pTask->id.idStr);
|
||||||
|
|
||||||
SStreamTask** ppHTask = NULL;
|
|
||||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||||
ppHTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id));
|
code = streamMetaAcquireTaskUnsafe(pMeta, &pTask->hTaskInfo.id, &pHTask);
|
||||||
if (ppHTask == NULL || *ppHTask == NULL) {
|
if (code != 0) {
|
||||||
tqError(
|
tqError(
|
||||||
"vgId:%d failed to acquire fill-history task:0x%x when handling update, may have been dropped already, rel "
|
"vgId:%d failed to acquire fill-history task:0x%x when handling update, may have been dropped already, rel "
|
||||||
"stream task:0x%x",
|
"stream task:0x%x",
|
||||||
vgId, (uint32_t)pTask->hTaskInfo.id.taskId, req.taskId);
|
vgId, (uint32_t)pTask->hTaskInfo.id.taskId, req.taskId);
|
||||||
CLEAR_RELATED_FILLHISTORY_TASK(pTask);
|
CLEAR_RELATED_FILLHISTORY_TASK(pTask);
|
||||||
} else {
|
} else {
|
||||||
tqDebug("s-task:%s fill-history task update nodeEp along with stream task", (*ppHTask)->id.idStr);
|
tqDebug("s-task:%s fill-history task update nodeEp along with stream task", pHTask->id.idStr);
|
||||||
bool updateEpSet = streamTaskUpdateEpsetInfo(*ppHTask, req.pNodeList);
|
bool updateEpSet = streamTaskUpdateEpsetInfo(pHTask, req.pNodeList);
|
||||||
if (updateEpSet) {
|
if (updateEpSet) {
|
||||||
updated = updateEpSet;
|
updated = updateEpSet;
|
||||||
}
|
}
|
||||||
|
|
||||||
streamTaskResetStatus(*ppHTask);
|
streamTaskResetStatus(pHTask);
|
||||||
streamTaskStopMonitorCheckRsp(&(*ppHTask)->taskCheckInfo, (*ppHTask)->id.idStr);
|
streamTaskStopMonitorCheckRsp(&pHTask->taskCheckInfo, pHTask->id.idStr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -256,8 +263,8 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
||||||
tqError("s-task:%s vgId:%d failed to save task, code:%s", idstr, vgId, tstrerror(code));
|
tqError("s-task:%s vgId:%d failed to save task, code:%s", idstr, vgId, tstrerror(code));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ppHTask != NULL) {
|
if (pHTask != NULL) {
|
||||||
code = streamMetaSaveTask(pMeta, *ppHTask);
|
code = streamMetaSaveTask(pMeta, pHTask);
|
||||||
if (code) {
|
if (code) {
|
||||||
tqError("s-task:%s vgId:%d failed to save related history task, code:%s", idstr, vgId, tstrerror(code));
|
tqError("s-task:%s vgId:%d failed to save related history task, code:%s", idstr, vgId, tstrerror(code));
|
||||||
}
|
}
|
||||||
|
@ -271,15 +278,17 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
||||||
tqError("s-task:%s vgId:%d failed to stop task, code:%s", idstr, vgId, tstrerror(code));
|
tqError("s-task:%s vgId:%d failed to stop task, code:%s", idstr, vgId, tstrerror(code));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ppHTask != NULL) {
|
if (pHTask != NULL) {
|
||||||
code = streamTaskStop(*ppHTask);
|
code = streamTaskStop(pHTask);
|
||||||
if (code) {
|
if (code) {
|
||||||
tqError("s-task:%s vgId:%d failed to stop related history task, code:%s", idstr, vgId, tstrerror(code));
|
tqError("s-task:%s vgId:%d failed to stop related history task, code:%s", idstr, vgId, tstrerror(code));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// keep info
|
// keep info
|
||||||
streamMetaAddIntoUpdateTaskList(pMeta, pTask, (ppHTask != NULL) ? (*ppHTask) : NULL, req.transId, st);
|
streamMetaAddIntoUpdateTaskList(pMeta, pTask, (pHTask != NULL) ? (pHTask) : NULL, req.transId, st);
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
streamMetaReleaseTask(pMeta, pHTask);
|
||||||
|
|
||||||
rsp.code = TSDB_CODE_SUCCESS;
|
rsp.code = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
@ -643,7 +652,6 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve
|
||||||
if (code < 0) {
|
if (code < 0) {
|
||||||
tqError("failed to add s-task:0x%x into vgId:%d meta, existed:%d, code:%s", vgId, taskId, numOfTasks,
|
tqError("failed to add s-task:0x%x into vgId:%d meta, existed:%d, code:%s", vgId, taskId, numOfTasks,
|
||||||
tstrerror(code));
|
tstrerror(code));
|
||||||
tFreeStreamTask(pTask);
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -673,7 +681,6 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
tqWarn("vgId:%d failed to add s-task:0x%x, since already exists in meta store, total:%d", vgId, taskId, numOfTasks);
|
tqWarn("vgId:%d failed to add s-task:0x%x, since already exists in meta store, total:%d", vgId, taskId, numOfTasks);
|
||||||
tFreeStreamTask(pTask);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
|
@ -681,25 +688,25 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve
|
||||||
|
|
||||||
int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen) {
|
int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen) {
|
||||||
SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg;
|
SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg;
|
||||||
|
int32_t code = 0;
|
||||||
|
int32_t vgId = pMeta->vgId;
|
||||||
|
STaskId hTaskId = {0};
|
||||||
|
SStreamTask* pTask = NULL;
|
||||||
|
|
||||||
int32_t code = 0;
|
|
||||||
int32_t vgId = pMeta->vgId;
|
|
||||||
STaskId hTaskId = {0};
|
|
||||||
tqDebug("vgId:%d receive msg to drop s-task:0x%x", vgId, pReq->taskId);
|
tqDebug("vgId:%d receive msg to drop s-task:0x%x", vgId, pReq->taskId);
|
||||||
|
|
||||||
streamMetaWLock(pMeta);
|
streamMetaWLock(pMeta);
|
||||||
|
|
||||||
STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId};
|
STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId};
|
||||||
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask);
|
||||||
if ((ppTask != NULL) && ((*ppTask) != NULL)) {
|
if (code == 0) {
|
||||||
int32_t unusedRetRef = streamMetaAcquireOneTask(*ppTask);
|
|
||||||
SStreamTask* pTask = *ppTask;
|
|
||||||
|
|
||||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||||
hTaskId.streamId = pTask->hTaskInfo.id.streamId;
|
hTaskId.streamId = pTask->hTaskInfo.id.streamId;
|
||||||
hTaskId.taskId = pTask->hTaskInfo.id.taskId;
|
hTaskId.taskId = pTask->hTaskInfo.id.taskId;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// clear the relationship, and then release the stream tasks, to avoid invalid accessing of already freed
|
||||||
|
// related stream(history) task
|
||||||
streamTaskSetRemoveBackendFiles(pTask);
|
streamTaskSetRemoveBackendFiles(pTask);
|
||||||
code = streamTaskClearHTaskAttr(pTask, pReq->resetRelHalt);
|
code = streamTaskClearHTaskAttr(pTask, pReq->resetRelHalt);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
@ -742,18 +749,19 @@ int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen
|
||||||
|
|
||||||
int32_t tqStreamTaskProcessUpdateCheckpointReq(SStreamMeta* pMeta, bool restored, char* msg) {
|
int32_t tqStreamTaskProcessUpdateCheckpointReq(SStreamMeta* pMeta, bool restored, char* msg) {
|
||||||
SVUpdateCheckpointInfoReq* pReq = (SVUpdateCheckpointInfoReq*)msg;
|
SVUpdateCheckpointInfoReq* pReq = (SVUpdateCheckpointInfoReq*)msg;
|
||||||
|
int32_t code = 0;
|
||||||
|
int32_t vgId = pMeta->vgId;
|
||||||
|
SStreamTask* pTask = NULL;
|
||||||
|
|
||||||
int32_t code = 0;
|
|
||||||
int32_t vgId = pMeta->vgId;
|
|
||||||
tqDebug("vgId:%d receive msg to update-checkpoint-info for s-task:0x%x", vgId, pReq->taskId);
|
tqDebug("vgId:%d receive msg to update-checkpoint-info for s-task:0x%x", vgId, pReq->taskId);
|
||||||
|
|
||||||
streamMetaWLock(pMeta);
|
streamMetaWLock(pMeta);
|
||||||
|
|
||||||
STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId};
|
STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId};
|
||||||
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask);
|
||||||
|
if (code == 0) {
|
||||||
if (ppTask != NULL && (*ppTask) != NULL) {
|
code = streamTaskUpdateTaskCheckpointInfo(pTask, restored, pReq);
|
||||||
code = streamTaskUpdateTaskCheckpointInfo(*ppTask, restored, pReq);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
} else { // failed to get the task.
|
} else { // failed to get the task.
|
||||||
int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
||||||
tqError(
|
tqError(
|
||||||
|
@ -763,7 +771,6 @@ int32_t tqStreamTaskProcessUpdateCheckpointReq(SStreamMeta* pMeta, bool restored
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
|
|
||||||
// always return success when handling the requirement issued by mnode during transaction.
|
// always return success when handling the requirement issued by mnode during transaction.
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -789,11 +796,6 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) {
|
||||||
tqInfo("vgId:%d tasks are all updated and stopped, restart all tasks, triggered by transId:%d, ts:%" PRId64, vgId,
|
tqInfo("vgId:%d tasks are all updated and stopped, restart all tasks, triggered by transId:%d, ts:%" PRId64, vgId,
|
||||||
pMeta->updateInfo.completeTransId, pMeta->updateInfo.completeTs);
|
pMeta->updateInfo.completeTransId, pMeta->updateInfo.completeTs);
|
||||||
|
|
||||||
while (streamMetaTaskInTimer(pMeta)) {
|
|
||||||
tqDebug("vgId:%d some tasks in timer, wait for 100ms and recheck", pMeta->vgId);
|
|
||||||
taosMsleep(100);
|
|
||||||
}
|
|
||||||
|
|
||||||
streamMetaWLock(pMeta);
|
streamMetaWLock(pMeta);
|
||||||
streamMetaClear(pMeta);
|
streamMetaClear(pMeta);
|
||||||
|
|
||||||
|
|
|
@ -1217,7 +1217,7 @@ int32_t udfdOpenClientRpc() {
|
||||||
connLimitNum = TMIN(connLimitNum, 500);
|
connLimitNum = TMIN(connLimitNum, 500);
|
||||||
rpcInit.connLimitNum = connLimitNum;
|
rpcInit.connLimitNum = connLimitNum;
|
||||||
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
|
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
|
||||||
TAOS_CHECK_RETURN(taosVersionStrToInt(version, &(rpcInit.compatibilityVer)));
|
TAOS_CHECK_RETURN(taosVersionStrToInt(td_version, &rpcInit.compatibilityVer));
|
||||||
global.clientRpc = rpcOpen(&rpcInit);
|
global.clientRpc = rpcOpen(&rpcInit);
|
||||||
if (global.clientRpc == NULL) {
|
if (global.clientRpc == NULL) {
|
||||||
fnError("failed to init dnode rpc client");
|
fnError("failed to init dnode rpc client");
|
||||||
|
@ -1470,9 +1470,9 @@ static int32_t udfdParseArgs(int32_t argc, char *argv[]) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void udfdPrintVersion() {
|
static void udfdPrintVersion() {
|
||||||
(void)printf("udfd version: %s compatible_version: %s\n", version, compatible_version);
|
(void)printf("udfd version: %s compatible_version: %s\n", td_version, td_compatible_version);
|
||||||
(void)printf("git: %s\n", gitinfo);
|
(void)printf("git: %s\n", td_gitinfo);
|
||||||
(void)printf("build: %s\n", buildinfo);
|
(void)printf("build: %s\n", td_buildinfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t udfdInitLog() {
|
static int32_t udfdInitLog() {
|
||||||
|
|
|
@ -2902,7 +2902,7 @@ static int32_t rewriteDatabaseFunc(STranslateContext* pCxt, SNode** pNode) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t rewriteClentVersionFunc(STranslateContext* pCxt, SNode** pNode) {
|
static int32_t rewriteClentVersionFunc(STranslateContext* pCxt, SNode** pNode) {
|
||||||
char* pVer = taosStrdup((void*)version);
|
char* pVer = taosStrdup((void*)td_version);
|
||||||
if (NULL == pVer) {
|
if (NULL == pVer) {
|
||||||
return terrno;
|
return terrno;
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
#include "streamBackendRocksdb.h"
|
#include "streamBackendRocksdb.h"
|
||||||
#include "trpc.h"
|
#include "trpc.h"
|
||||||
#include "tstream.h"
|
#include "tstream.h"
|
||||||
|
#include "tref.h"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
@ -70,7 +71,7 @@ struct SActiveCheckpointInfo {
|
||||||
SStreamTmrInfo chkptReadyMsgTmr;
|
SStreamTmrInfo chkptReadyMsgTmr;
|
||||||
};
|
};
|
||||||
|
|
||||||
int32_t streamCleanBeforeQuitTmr(SStreamTmrInfo* pInfo, SStreamTask* pTask);
|
void streamCleanBeforeQuitTmr(SStreamTmrInfo* pInfo, void* param);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int8_t type;
|
int8_t type;
|
||||||
|
@ -225,6 +226,8 @@ void destroyMetaHbInfo(SMetaHbInfo* pInfo);
|
||||||
void streamMetaWaitForHbTmrQuit(SStreamMeta* pMeta);
|
void streamMetaWaitForHbTmrQuit(SStreamMeta* pMeta);
|
||||||
void streamMetaGetHbSendInfo(SMetaHbInfo* pInfo, int64_t* pStartTs, int32_t* pSendCount);
|
void streamMetaGetHbSendInfo(SMetaHbInfo* pInfo, int64_t* pStartTs, int32_t* pSendCount);
|
||||||
int32_t streamMetaSendHbHelper(SStreamMeta* pMeta);
|
int32_t streamMetaSendHbHelper(SStreamMeta* pMeta);
|
||||||
|
int32_t metaRefMgtAdd(int64_t vgId, int64_t* rid);
|
||||||
|
void metaRefMgtRemove(int64_t* pRefId);
|
||||||
|
|
||||||
ECHECKPOINT_BACKUP_TYPE streamGetCheckpointBackupType();
|
ECHECKPOINT_BACKUP_TYPE streamGetCheckpointBackupType();
|
||||||
|
|
||||||
|
|
|
@ -299,13 +299,14 @@ void streamTaskStartMonitorCheckRsp(SStreamTask* pTask) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); // add task ref here
|
|
||||||
streamTaskInitTaskCheckInfo(pInfo, &pTask->outputInfo, taosGetTimestampMs());
|
streamTaskInitTaskCheckInfo(pInfo, &pTask->outputInfo, taosGetTimestampMs());
|
||||||
|
|
||||||
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
int64_t* pTaskRefId = NULL;
|
||||||
stDebug("s-task:%s start check-rsp monitor, ref:%d ", pTask->id.idStr, ref);
|
code = streamTaskAllocRefId(pTask, &pTaskRefId);
|
||||||
streamTmrStart(rspMonitorFn, CHECK_RSP_CHECK_INTERVAL, pTask, streamTimer, &pInfo->checkRspTmr, vgId,
|
if (code == 0) {
|
||||||
"check-status-monitor");
|
streamTmrStart(rspMonitorFn, CHECK_RSP_CHECK_INTERVAL, pTaskRefId, streamTimer, &pInfo->checkRspTmr, vgId,
|
||||||
|
"check-status-monitor");
|
||||||
|
}
|
||||||
|
|
||||||
streamMutexUnlock(&pInfo->checkInfoLock);
|
streamMutexUnlock(&pInfo->checkInfoLock);
|
||||||
}
|
}
|
||||||
|
@ -721,21 +722,45 @@ int32_t addDownstreamFailedStatusResultAsync(SMsgCb* pMsgCb, int32_t vgId, int64
|
||||||
return streamTaskSchedTask(pMsgCb, vgId, streamId, taskId, STREAM_EXEC_T_ADD_FAILED_TASK);
|
return streamTaskSchedTask(pMsgCb, vgId, streamId, taskId, STREAM_EXEC_T_ADD_FAILED_TASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void doCleanup(SStreamTask* pTask, SArray* pNotReadyList, SArray* pTimeoutList, void* param) {
|
||||||
|
streamMetaReleaseTask(pTask->pMeta, pTask);
|
||||||
|
|
||||||
|
taosArrayDestroy(pNotReadyList);
|
||||||
|
taosArrayDestroy(pTimeoutList);
|
||||||
|
streamTaskFreeRefId(param);
|
||||||
|
}
|
||||||
|
|
||||||
// this function is executed in timer thread
|
// this function is executed in timer thread
|
||||||
void rspMonitorFn(void* param, void* tmrId) {
|
void rspMonitorFn(void* param, void* tmrId) {
|
||||||
SStreamTask* pTask = param;
|
|
||||||
SStreamMeta* pMeta = pTask->pMeta;
|
|
||||||
STaskCheckInfo* pInfo = &pTask->taskCheckInfo;
|
|
||||||
int32_t vgId = pTask->pMeta->vgId;
|
|
||||||
int64_t now = taosGetTimestampMs();
|
|
||||||
int64_t timeoutDuration = now - pInfo->timeoutStartTs;
|
|
||||||
const char* id = pTask->id.idStr;
|
|
||||||
int32_t numOfReady = 0;
|
int32_t numOfReady = 0;
|
||||||
int32_t numOfFault = 0;
|
int32_t numOfFault = 0;
|
||||||
int32_t numOfNotRsp = 0;
|
int32_t numOfNotRsp = 0;
|
||||||
int32_t numOfNotReady = 0;
|
int32_t numOfNotReady = 0;
|
||||||
int32_t numOfTimeout = 0;
|
int32_t numOfTimeout = 0;
|
||||||
int32_t total = taosArrayGetSize(pInfo->pList);
|
int64_t taskRefId = *(int64_t*)param;
|
||||||
|
int64_t now = taosGetTimestampMs();
|
||||||
|
SArray* pNotReadyList = NULL;
|
||||||
|
SArray* pTimeoutList = NULL;
|
||||||
|
SStreamMeta* pMeta = NULL;
|
||||||
|
STaskCheckInfo* pInfo = NULL;
|
||||||
|
int32_t vgId = -1;
|
||||||
|
int64_t timeoutDuration = 0;
|
||||||
|
const char* id = NULL;
|
||||||
|
int32_t total = 0;
|
||||||
|
|
||||||
|
SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId);
|
||||||
|
if (pTask == NULL) {
|
||||||
|
stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__);
|
||||||
|
streamTaskFreeRefId(param);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
pMeta = pTask->pMeta;
|
||||||
|
pInfo = &pTask->taskCheckInfo;
|
||||||
|
vgId = pTask->pMeta->vgId;
|
||||||
|
timeoutDuration = now - pInfo->timeoutStartTs;
|
||||||
|
id = pTask->id.idStr;
|
||||||
|
total = (int32_t) taosArrayGetSize(pInfo->pList);
|
||||||
|
|
||||||
stDebug("s-task:%s start to do check-downstream-rsp check in tmr", id);
|
stDebug("s-task:%s start to do check-downstream-rsp check in tmr", id);
|
||||||
|
|
||||||
|
@ -744,12 +769,10 @@ void rspMonitorFn(void* param, void* tmrId) {
|
||||||
streamMutexUnlock(&pTask->lock);
|
streamMutexUnlock(&pTask->lock);
|
||||||
|
|
||||||
if (state.state == TASK_STATUS__STOP) {
|
if (state.state == TASK_STATUS__STOP) {
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
stDebug("s-task:%s status:%s vgId:%d quit from monitor check-rsp tmr", id, state.name, vgId);
|
||||||
stDebug("s-task:%s status:%s vgId:%d quit from monitor check-rsp tmr, ref:%d", id, state.name, vgId, ref);
|
|
||||||
|
|
||||||
streamTaskCompleteCheckRsp(pInfo, true, id);
|
streamTaskCompleteCheckRsp(pInfo, true, id);
|
||||||
|
|
||||||
// not record the failed of the current task if try to close current vnode
|
// not record the failure of the current task if try to close current vnode
|
||||||
// otherwise, the put of message operation may incur invalid read of message queue.
|
// otherwise, the put of message operation may incur invalid read of message queue.
|
||||||
if (!pMeta->closeFlag) {
|
if (!pMeta->closeFlag) {
|
||||||
int32_t code = addDownstreamFailedStatusResultAsync(pTask->pMsgCb, vgId, pTask->id.streamId, pTask->id.taskId);
|
int32_t code = addDownstreamFailedStatusResultAsync(pTask->pMsgCb, vgId, pTask->id.streamId, pTask->id.taskId);
|
||||||
|
@ -758,33 +781,30 @@ void rspMonitorFn(void* param, void* tmrId) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
doCleanup(pTask, pNotReadyList, pTimeoutList, param);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (state.state == TASK_STATUS__DROPPING || state.state == TASK_STATUS__READY) {
|
if (state.state == TASK_STATUS__DROPPING || state.state == TASK_STATUS__READY) {
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
stDebug("s-task:%s status:%s vgId:%d quit from monitor check-rsp tmr", id, state.name, vgId);
|
||||||
stDebug("s-task:%s status:%s vgId:%d quit from monitor check-rsp tmr, ref:%d", id, state.name, vgId, ref);
|
|
||||||
|
|
||||||
streamTaskCompleteCheckRsp(pInfo, true, id);
|
streamTaskCompleteCheckRsp(pInfo, true, id);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
doCleanup(pTask, pNotReadyList, pTimeoutList, param);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMutexLock(&pInfo->checkInfoLock);
|
streamMutexLock(&pInfo->checkInfoLock);
|
||||||
if (pInfo->notReadyTasks == 0) {
|
if (pInfo->notReadyTasks == 0) {
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
stDebug("s-task:%s status:%s vgId:%d all downstream ready, quit from monitor rsp tmr", id, state.name, vgId);
|
||||||
stDebug("s-task:%s status:%s vgId:%d all downstream ready, quit from monitor rsp tmr, ref:%d", id, state.name, vgId,
|
|
||||||
ref);
|
|
||||||
|
|
||||||
streamTaskCompleteCheckRsp(pInfo, false, id);
|
streamTaskCompleteCheckRsp(pInfo, false, id);
|
||||||
streamMutexUnlock(&pInfo->checkInfoLock);
|
streamMutexUnlock(&pInfo->checkInfoLock);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
doCleanup(pTask, pNotReadyList, pTimeoutList, param);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
SArray* pNotReadyList = taosArrayInit(4, sizeof(int64_t));
|
pNotReadyList = taosArrayInit(4, sizeof(int64_t));
|
||||||
SArray* pTimeoutList = taosArrayInit(4, sizeof(int64_t));
|
pTimeoutList = taosArrayInit(4, sizeof(int64_t));
|
||||||
|
|
||||||
if (state.state == TASK_STATUS__UNINIT) {
|
if (state.state == TASK_STATUS__UNINIT) {
|
||||||
getCheckRspStatus(pInfo, timeoutDuration, &numOfReady, &numOfFault, &numOfNotRsp, pTimeoutList, pNotReadyList, id);
|
getCheckRspStatus(pInfo, timeoutDuration, &numOfReady, &numOfFault, &numOfNotRsp, pTimeoutList, pNotReadyList, id);
|
||||||
|
@ -795,31 +815,25 @@ void rspMonitorFn(void* param, void* tmrId) {
|
||||||
// fault tasks detected, not try anymore
|
// fault tasks detected, not try anymore
|
||||||
bool jumpOut = false;
|
bool jumpOut = false;
|
||||||
if ((numOfReady + numOfFault + numOfNotReady + numOfTimeout + numOfNotRsp) != total) {
|
if ((numOfReady + numOfFault + numOfNotReady + numOfTimeout + numOfNotRsp) != total) {
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
|
||||||
|
|
||||||
stError(
|
stError(
|
||||||
"s-task:%s vgId:%d internal error in handling the check downstream procedure, rsp number is inconsistent, "
|
"s-task:%s vgId:%d internal error in handling the check downstream procedure, rsp number is inconsistent, "
|
||||||
"stop rspMonitor tmr, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, ready:%d ref:%d",
|
"stop rspMonitor tmr, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, ready:%d",
|
||||||
id, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady, ref);
|
id, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady);
|
||||||
jumpOut = true;
|
jumpOut = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (numOfFault > 0) {
|
if (numOfFault > 0) {
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
|
||||||
stDebug(
|
stDebug(
|
||||||
"s-task:%s status:%s vgId:%d all rsp. quit from monitor rsp tmr, since vnode-transfer/leader-change/restart "
|
"s-task:%s status:%s vgId:%d all rsp. quit from monitor rsp tmr, since vnode-transfer/leader-change/restart "
|
||||||
"detected, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, ready:%d ref:%d",
|
"detected, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, ready:%d",
|
||||||
id, state.name, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady, ref);
|
id, state.name, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady);
|
||||||
jumpOut = true;
|
jumpOut = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (jumpOut) {
|
if (jumpOut) {
|
||||||
streamTaskCompleteCheckRsp(pInfo, false, id);
|
streamTaskCompleteCheckRsp(pInfo, false, id);
|
||||||
streamMutexUnlock(&pInfo->checkInfoLock);
|
streamMutexUnlock(&pInfo->checkInfoLock);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
doCleanup(pTask, pNotReadyList, pTimeoutList, param);
|
||||||
|
|
||||||
taosArrayDestroy(pNotReadyList);
|
|
||||||
taosArrayDestroy(pTimeoutList);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} else { // unexpected status
|
} else { // unexpected status
|
||||||
|
@ -828,11 +842,10 @@ void rspMonitorFn(void* param, void* tmrId) {
|
||||||
|
|
||||||
// checking of downstream tasks has been stopped by other threads
|
// checking of downstream tasks has been stopped by other threads
|
||||||
if (pInfo->stopCheckProcess == 1) {
|
if (pInfo->stopCheckProcess == 1) {
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
|
||||||
stDebug(
|
stDebug(
|
||||||
"s-task:%s status:%s vgId:%d stopped by other threads to check downstream process, total:%d, notRsp:%d, "
|
"s-task:%s status:%s vgId:%d stopped by other threads to check downstream process, total:%d, notRsp:%d, "
|
||||||
"notReady:%d, fault:%d, timeout:%d, ready:%d ref:%d",
|
"notReady:%d, fault:%d, timeout:%d, ready:%d",
|
||||||
id, state.name, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady, ref);
|
id, state.name, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady);
|
||||||
|
|
||||||
streamTaskCompleteCheckRsp(pInfo, false, id);
|
streamTaskCompleteCheckRsp(pInfo, false, id);
|
||||||
streamMutexUnlock(&pInfo->checkInfoLock);
|
streamMutexUnlock(&pInfo->checkInfoLock);
|
||||||
|
@ -842,10 +855,7 @@ void rspMonitorFn(void* param, void* tmrId) {
|
||||||
stError("s-task:%s failed to create async record start failed task, code:%s", id, tstrerror(code));
|
stError("s-task:%s failed to create async record start failed task, code:%s", id, tstrerror(code));
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
doCleanup(pTask, pNotReadyList, pTimeoutList, param);
|
||||||
|
|
||||||
taosArrayDestroy(pNotReadyList);
|
|
||||||
taosArrayDestroy(pTimeoutList);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -857,7 +867,7 @@ void rspMonitorFn(void* param, void* tmrId) {
|
||||||
handleTimeoutDownstreamTasks(pTask, pTimeoutList);
|
handleTimeoutDownstreamTasks(pTask, pTimeoutList);
|
||||||
}
|
}
|
||||||
|
|
||||||
streamTmrStart(rspMonitorFn, CHECK_RSP_CHECK_INTERVAL, pTask, streamTimer, &pInfo->checkRspTmr, vgId,
|
streamTmrStart(rspMonitorFn, CHECK_RSP_CHECK_INTERVAL, param, streamTimer, &pInfo->checkRspTmr, vgId,
|
||||||
"check-status-monitor");
|
"check-status-monitor");
|
||||||
streamMutexUnlock(&pInfo->checkInfoLock);
|
streamMutexUnlock(&pInfo->checkInfoLock);
|
||||||
|
|
||||||
|
@ -865,7 +875,5 @@ void rspMonitorFn(void* param, void* tmrId) {
|
||||||
"s-task:%s vgId:%d continue checking rsp in 300ms, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, "
|
"s-task:%s vgId:%d continue checking rsp in 300ms, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, "
|
||||||
"ready:%d",
|
"ready:%d",
|
||||||
id, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady);
|
id, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady);
|
||||||
|
doCleanup(pTask, pNotReadyList, pTimeoutList, NULL);
|
||||||
taosArrayDestroy(pNotReadyList);
|
|
||||||
taosArrayDestroy(pTimeoutList);
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -345,13 +345,15 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock
|
||||||
SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr;
|
SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr;
|
||||||
int8_t old = atomic_val_compare_exchange_8(&pTmrInfo->isActive, 0, 1);
|
int8_t old = atomic_val_compare_exchange_8(&pTmrInfo->isActive, 0, 1);
|
||||||
if (old == 0) {
|
if (old == 0) {
|
||||||
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
stDebug("s-task:%s start checkpoint-trigger monitor in 10s", pTask->id.idStr);
|
||||||
stDebug("s-task:%s start checkpoint-trigger monitor in 10s, ref:%d ", pTask->id.idStr, ref);
|
|
||||||
|
|
||||||
int32_t unusedRetRef = streamMetaAcquireOneTask(pTask);
|
int64_t* pTaskRefId = NULL;
|
||||||
streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId,
|
code = streamTaskAllocRefId(pTask, &pTaskRefId);
|
||||||
"trigger-recv-monitor");
|
if (code == 0) {
|
||||||
pTmrInfo->launchChkptId = pActiveInfo->activeId;
|
streamTmrStart(checkpointTriggerMonitorFn, 200, pTaskRefId, streamTimer, &pTmrInfo->tmrHandle, vgId,
|
||||||
|
"trigger-recv-monitor");
|
||||||
|
pTmrInfo->launchChkptId = pActiveInfo->activeId;
|
||||||
|
}
|
||||||
} else { // already launched, do nothing
|
} else { // already launched, do nothing
|
||||||
stError("s-task:%s previous checkpoint-trigger monitor tmr is set, not start new one", pTask->id.idStr);
|
stError("s-task:%s previous checkpoint-trigger monitor tmr is set, not start new one", pTask->id.idStr);
|
||||||
}
|
}
|
||||||
|
@ -890,7 +892,7 @@ int32_t streamTaskBuildCheckpoint(SStreamTask* pTask) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t doChkptStatusCheck(SStreamTask* pTask) {
|
static int32_t doChkptStatusCheck(SStreamTask* pTask, void* param) {
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
int32_t vgId = pTask->pMeta->vgId;
|
int32_t vgId = pTask->pMeta->vgId;
|
||||||
SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo;
|
SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo;
|
||||||
|
@ -898,25 +900,24 @@ static int32_t doChkptStatusCheck(SStreamTask* pTask) {
|
||||||
|
|
||||||
// checkpoint-trigger recv flag is set, quit
|
// checkpoint-trigger recv flag is set, quit
|
||||||
if (pActiveInfo->allUpstreamTriggerRecv) {
|
if (pActiveInfo->allUpstreamTriggerRecv) {
|
||||||
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
|
streamCleanBeforeQuitTmr(pTmrInfo, param);
|
||||||
stDebug("s-task:%s vgId:%d all checkpoint-trigger recv, quit from monitor checkpoint-trigger, ref:%d", id, vgId,
|
stDebug("s-task:%s vgId:%d all checkpoint-trigger recv, quit from monitor checkpoint-trigger", id, vgId);
|
||||||
ref);
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pTmrInfo->launchChkptId != pActiveInfo->activeId) {
|
if (pTmrInfo->launchChkptId != pActiveInfo->activeId) {
|
||||||
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
|
streamCleanBeforeQuitTmr(pTmrInfo, param);
|
||||||
stWarn("s-task:%s vgId:%d checkpoint-trigger retrieve by previous checkpoint procedure, checkpointId:%" PRId64
|
stWarn("s-task:%s vgId:%d checkpoint-trigger retrieve by previous checkpoint procedure, checkpointId:%" PRId64
|
||||||
", quit, ref:%d",
|
", quit",
|
||||||
id, vgId, pTmrInfo->launchChkptId, ref);
|
id, vgId, pTmrInfo->launchChkptId);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// active checkpoint info is cleared for now
|
// active checkpoint info is cleared for now
|
||||||
if ((pActiveInfo->activeId == 0) || (pActiveInfo->transId == 0) || (pTask->chkInfo.startTs == 0)) {
|
if ((pActiveInfo->activeId == 0) || (pActiveInfo->transId == 0) || (pTask->chkInfo.startTs == 0)) {
|
||||||
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
|
streamCleanBeforeQuitTmr(pTmrInfo, param);
|
||||||
stWarn("s-task:%s vgId:%d active checkpoint may be cleared, quit from retrieve checkpoint-trigger send tmr, ref:%d",
|
stWarn("s-task:%s vgId:%d active checkpoint may be cleared, quit from retrieve checkpoint-trigger send tmr", id,
|
||||||
id, vgId, ref);
|
vgId);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -964,22 +965,22 @@ static int32_t doFindNotSendUpstream(SStreamTask* pTask, SArray* pList, SArray**
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t chkptTriggerRecvMonitorHelper(SStreamTask* pTask, SArray* pNotSendList) {
|
static int32_t chkptTriggerRecvMonitorHelper(SStreamTask* pTask, void* param, SArray* pNotSendList) {
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
SArray* pList = pTask->upstreamInfo.pList; // send msg to retrieve checkpoint trigger msg
|
SArray* pList = pTask->upstreamInfo.pList; // send msg to retrieve checkpoint trigger msg
|
||||||
SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo;
|
SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo;
|
||||||
SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr;
|
SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr;
|
||||||
int32_t vgId = pTask->pMeta->vgId;
|
int32_t vgId = pTask->pMeta->vgId;
|
||||||
|
|
||||||
int32_t code = doChkptStatusCheck(pTask);
|
int32_t code = doChkptStatusCheck(pTask, param);
|
||||||
if (code) {
|
if (code) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = doFindNotSendUpstream(pTask, pList, &pNotSendList);
|
code = doFindNotSendUpstream(pTask, pList, &pNotSendList);
|
||||||
if (code) {
|
if (code) {
|
||||||
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
|
streamCleanBeforeQuitTmr(pTmrInfo, param);
|
||||||
stDebug("s-task:%s failed to find not send upstream, code:%s, out of tmr, ref:%d", id, tstrerror(code), ref);
|
stDebug("s-task:%s failed to find not send upstream, code:%s, out of tmr", id, tstrerror(code));
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -993,37 +994,50 @@ static int32_t chkptTriggerRecvMonitorHelper(SStreamTask* pTask, SArray* pNotSen
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void doCleanup(SStreamTask* pTask, SArray* pList) {
|
||||||
|
streamMetaReleaseTask(pTask->pMeta, pTask);
|
||||||
|
taosArrayDestroy(pList);
|
||||||
|
}
|
||||||
|
|
||||||
void checkpointTriggerMonitorFn(void* param, void* tmrId) {
|
void checkpointTriggerMonitorFn(void* param, void* tmrId) {
|
||||||
SStreamTask* pTask = param;
|
|
||||||
int32_t vgId = pTask->pMeta->vgId;
|
|
||||||
int64_t now = taosGetTimestampMs();
|
|
||||||
const char* id = pTask->id.idStr;
|
|
||||||
SArray* pNotSendList = NULL;
|
|
||||||
SArray* pList = pTask->upstreamInfo.pList; // send msg to retrieve checkpoint trigger msg
|
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
int32_t numOfNotSend = 0;
|
int32_t numOfNotSend = 0;
|
||||||
|
SArray* pNotSendList = NULL;
|
||||||
|
int64_t taskRefId = *(int64_t*)param;
|
||||||
|
int64_t now = taosGetTimestampMs();
|
||||||
|
|
||||||
|
SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId);
|
||||||
|
if (pTask == NULL) {
|
||||||
|
stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__);
|
||||||
|
streamTaskFreeRefId(param);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t vgId = pTask->pMeta->vgId;
|
||||||
|
const char* id = pTask->id.idStr;
|
||||||
|
SArray* pList = pTask->upstreamInfo.pList; // send msg to retrieve checkpoint trigger msg
|
||||||
SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo;
|
SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo;
|
||||||
SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr;
|
SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr;
|
||||||
|
|
||||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||||
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
|
streamCleanBeforeQuitTmr(pTmrInfo, param);
|
||||||
stError("s-task:%s source task should not start the checkpoint-trigger monitor fn, ref:%d quit", id, ref);
|
stError("s-task:%s source task should not start the checkpoint-trigger monitor fn, quit", id);
|
||||||
streamMetaReleaseTask(pTask->pMeta, pTask);
|
doCleanup(pTask, pNotSendList);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// check the status every 100ms
|
// check the status every 100ms
|
||||||
if (streamTaskShouldStop(pTask)) {
|
if (streamTaskShouldStop(pTask)) {
|
||||||
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
|
streamCleanBeforeQuitTmr(pTmrInfo, param);
|
||||||
stDebug("s-task:%s vgId:%d quit from monitor checkpoint-trigger, ref:%d", id, vgId, ref);
|
stDebug("s-task:%s vgId:%d quit from monitor checkpoint-trigger", id, vgId);
|
||||||
streamMetaReleaseTask(pTask->pMeta, pTask);
|
doCleanup(pTask, pNotSendList);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (++pTmrInfo->activeCounter < 50) {
|
if (++pTmrInfo->activeCounter < 50) {
|
||||||
streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId,
|
streamTmrStart(checkpointTriggerMonitorFn, 200, param, streamTimer, &pTmrInfo->tmrHandle, vgId,
|
||||||
"trigger-recv-monitor");
|
"trigger-recv-monitor");
|
||||||
|
doCleanup(pTask, pNotSendList);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1035,20 +1049,19 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) {
|
||||||
streamMutexUnlock(&pTask->lock);
|
streamMutexUnlock(&pTask->lock);
|
||||||
|
|
||||||
if (state.state != TASK_STATUS__CK) {
|
if (state.state != TASK_STATUS__CK) {
|
||||||
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
|
streamCleanBeforeQuitTmr(pTmrInfo, param);
|
||||||
stDebug("s-task:%s vgId:%d status:%s not in checkpoint status, quit from monitor checkpoint-trigger, ref:%d", id,
|
stDebug("s-task:%s vgId:%d status:%s not in checkpoint status, quit from monitor checkpoint-trigger", id,
|
||||||
vgId, state.name, ref);
|
vgId, state.name);
|
||||||
streamMetaReleaseTask(pTask->pMeta, pTask);
|
doCleanup(pTask, pNotSendList);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMutexLock(&pActiveInfo->lock);
|
streamMutexLock(&pActiveInfo->lock);
|
||||||
code = chkptTriggerRecvMonitorHelper(pTask, pNotSendList);
|
code = chkptTriggerRecvMonitorHelper(pTask, param, pNotSendList);
|
||||||
streamMutexUnlock(&pActiveInfo->lock);
|
streamMutexUnlock(&pActiveInfo->lock);
|
||||||
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
streamMetaReleaseTask(pTask->pMeta, pTask);
|
doCleanup(pTask, pNotSendList);
|
||||||
taosArrayDestroy(pNotSendList);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1056,15 +1069,14 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) {
|
||||||
numOfNotSend = taosArrayGetSize(pNotSendList);
|
numOfNotSend = taosArrayGetSize(pNotSendList);
|
||||||
if (numOfNotSend > 0) {
|
if (numOfNotSend > 0) {
|
||||||
stDebug("s-task:%s start to monitor checkpoint-trigger in 10s", id);
|
stDebug("s-task:%s start to monitor checkpoint-trigger in 10s", id);
|
||||||
streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId,
|
streamTmrStart(checkpointTriggerMonitorFn, 200, param, streamTimer, &pTmrInfo->tmrHandle, vgId,
|
||||||
"trigger-recv-monitor");
|
"trigger-recv-monitor");
|
||||||
} else {
|
} else {
|
||||||
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
|
streamCleanBeforeQuitTmr(pTmrInfo, param);
|
||||||
stDebug("s-task:%s all checkpoint-trigger recved, quit from monitor checkpoint-trigger tmr, ref:%d", id, ref);
|
stDebug("s-task:%s all checkpoint-trigger recved, quit from monitor checkpoint-trigger tmr", id);
|
||||||
streamMetaReleaseTask(pTask->pMeta, pTask);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
taosArrayDestroy(pNotSendList);
|
doCleanup(pTask, pNotSendList);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t doSendRetrieveTriggerMsg(SStreamTask* pTask, SArray* pNotSendList) {
|
int32_t doSendRetrieveTriggerMsg(SStreamTask* pTask, SArray* pNotSendList) {
|
||||||
|
|
|
@ -518,45 +518,66 @@ static void doSendFailedDispatch(SStreamTask* pTask, SDispatchEntry* pEntry, int
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void cleanupInMonitor(int32_t taskId, int64_t taskRefId, void* param) {
|
||||||
|
int32_t ret = taosReleaseRef(streamTaskRefPool, taskRefId);
|
||||||
|
if (ret) {
|
||||||
|
stError("s-task:0x%x failed to release task refId:%" PRId64, taskId, taskRefId);
|
||||||
|
}
|
||||||
|
streamTaskFreeRefId(param);
|
||||||
|
}
|
||||||
|
|
||||||
static void doMonitorDispatchData(void* param, void* tmrId) {
|
static void doMonitorDispatchData(void* param, void* tmrId) {
|
||||||
SStreamTask* pTask = param;
|
|
||||||
const char* id = pTask->id.idStr;
|
|
||||||
int32_t vgId = pTask->pMeta->vgId;
|
|
||||||
SDispatchMsgInfo* pMsgInfo = &pTask->msgInfo;
|
|
||||||
int32_t msgId = pMsgInfo->msgId;
|
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
int64_t now = taosGetTimestampMs();
|
int64_t now = taosGetTimestampMs();
|
||||||
bool inDispatch = true;
|
bool inDispatch = true;
|
||||||
|
SStreamTask* pTask = NULL;
|
||||||
|
int64_t taskRefId = *(int64_t*)param;
|
||||||
|
const char* id = NULL;
|
||||||
|
int32_t vgId = -1;
|
||||||
|
SDispatchMsgInfo* pMsgInfo = NULL;
|
||||||
|
int32_t msgId = -1;
|
||||||
|
|
||||||
stDebug("s-task:%s start monitor dispatch data", id);
|
pTask = taosAcquireRef(streamTaskRefPool, taskRefId);
|
||||||
|
if (pTask == NULL) {
|
||||||
|
stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__);
|
||||||
|
streamTaskFreeRefId(param);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
id = pTask->id.idStr;
|
||||||
|
vgId = pTask->pMeta->vgId;
|
||||||
|
pMsgInfo = &pTask->msgInfo;
|
||||||
|
msgId = pMsgInfo->msgId;
|
||||||
|
|
||||||
|
stDebug("s-task:%s start to monitor dispatch data", id);
|
||||||
|
|
||||||
if (streamTaskShouldStop(pTask)) {
|
if (streamTaskShouldStop(pTask)) {
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
stDebug("s-task:%s should stop, abort from timer", pTask->id.idStr);
|
||||||
stDebug("s-task:%s should stop, abort from timer, ref:%d", pTask->id.idStr, ref);
|
|
||||||
setNotInDispatchMonitor(pMsgInfo);
|
setNotInDispatchMonitor(pMsgInfo);
|
||||||
|
cleanupInMonitor(pTask->id.taskId, taskRefId, param);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// slave task not handle the dispatch, downstream not ready will break the monitor timer
|
// slave task not handle the dispatch, downstream not ready will break the monitor timer
|
||||||
// follower not handle the dispatch rsp
|
// follower not handle the dispatch rsp
|
||||||
if ((pTask->pMeta->role == NODE_ROLE_FOLLOWER) || (pTask->status.downstreamReady != 1)) {
|
if ((pTask->pMeta->role == NODE_ROLE_FOLLOWER) || (pTask->status.downstreamReady != 1)) {
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
stError("s-task:%s vgId:%d follower or downstream not ready, jump out of monitor tmr", id, vgId);
|
||||||
stError("s-task:%s vgId:%d follower or downstream not ready, jump out of monitor tmr, ref:%d", id, vgId, ref);
|
|
||||||
setNotInDispatchMonitor(pMsgInfo);
|
setNotInDispatchMonitor(pMsgInfo);
|
||||||
|
cleanupInMonitor(pTask->id.taskId, taskRefId, param);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMutexLock(&pMsgInfo->lock);
|
streamMutexLock(&pMsgInfo->lock);
|
||||||
if (pTask->outputq.status == TASK_OUTPUT_STATUS__NORMAL) {
|
if (pTask->outputq.status == TASK_OUTPUT_STATUS__NORMAL) {
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
stDebug("s-task:%s not in dispatch procedure, abort from timer", pTask->id.idStr);
|
||||||
stDebug("s-task:%s not in dispatch procedure, abort from timer, ref:%d", pTask->id.idStr, ref);
|
|
||||||
|
|
||||||
pMsgInfo->inMonitor = 0;
|
pMsgInfo->inMonitor = 0;
|
||||||
inDispatch = false;
|
inDispatch = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMutexUnlock(&pMsgInfo->lock);
|
streamMutexUnlock(&pMsgInfo->lock);
|
||||||
|
|
||||||
if (!inDispatch) {
|
if (!inDispatch) {
|
||||||
|
cleanupInMonitor(pTask->id.taskId, taskRefId, param);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -564,6 +585,7 @@ static void doMonitorDispatchData(void* param, void* tmrId) {
|
||||||
if (numOfFailed == 0) {
|
if (numOfFailed == 0) {
|
||||||
stDebug("s-task:%s no error occurs, check again in %dms", id, DISPATCH_RETRY_INTERVAL_MS);
|
stDebug("s-task:%s no error occurs, check again in %dms", id, DISPATCH_RETRY_INTERVAL_MS);
|
||||||
streamStartMonitorDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
streamStartMonitorDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
||||||
|
cleanupInMonitor(pTask->id.taskId, taskRefId, param);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -628,18 +650,23 @@ static void doMonitorDispatchData(void* param, void* tmrId) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (streamTaskShouldStop(pTask)) {
|
if (streamTaskShouldStop(pTask)) {
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
stDebug("s-task:%s should stop, abort from timer", pTask->id.idStr);
|
||||||
stDebug("s-task:%s should stop, abort from timer, ref:%d", pTask->id.idStr, ref);
|
|
||||||
setNotInDispatchMonitor(pMsgInfo);
|
setNotInDispatchMonitor(pMsgInfo);
|
||||||
} else {
|
} else {
|
||||||
streamStartMonitorDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
streamStartMonitorDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cleanupInMonitor(pTask->id.taskId, taskRefId, param);
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamStartMonitorDispatchData(SStreamTask* pTask, int64_t waitDuration) {
|
void streamStartMonitorDispatchData(SStreamTask* pTask, int64_t waitDuration) {
|
||||||
int32_t vgId = pTask->pMeta->vgId;
|
int32_t vgId = pTask->pMeta->vgId;
|
||||||
streamTmrStart(doMonitorDispatchData, waitDuration, pTask, streamTimer, &pTask->msgInfo.pRetryTmr, vgId,
|
int64_t* pTaskRefId = NULL;
|
||||||
"dispatch-monitor");
|
int32_t code = streamTaskAllocRefId(pTask, &pTaskRefId);
|
||||||
|
if (code == 0) {
|
||||||
|
streamTmrStart(doMonitorDispatchData, waitDuration, pTaskRefId, streamTimer, &pTask->msgInfo.pRetryTmr, vgId,
|
||||||
|
"dispatch-monitor");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t doAddDispatchBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock,
|
static int32_t doAddDispatchBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock,
|
||||||
|
@ -854,9 +881,9 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) {
|
||||||
} else {
|
} else {
|
||||||
streamMutexLock(&pTask->msgInfo.lock);
|
streamMutexLock(&pTask->msgInfo.lock);
|
||||||
if (pTask->msgInfo.inMonitor == 0) {
|
if (pTask->msgInfo.inMonitor == 0) {
|
||||||
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
// int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
||||||
stDebug("s-task:%s start dispatch monitor tmr in %dms, ref:%d, dispatch code:%s", id, DISPATCH_RETRY_INTERVAL_MS,
|
stDebug("s-task:%s start dispatch monitor tmr in %dms, dispatch code:%s", id, DISPATCH_RETRY_INTERVAL_MS,
|
||||||
ref, tstrerror(code));
|
tstrerror(code));
|
||||||
streamStartMonitorDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
streamStartMonitorDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
||||||
pTask->msgInfo.inMonitor = 1;
|
pTask->msgInfo.inMonitor = 1;
|
||||||
} else {
|
} else {
|
||||||
|
@ -911,31 +938,31 @@ int32_t initCheckpointReadyMsg(SStreamTask* pTask, int32_t upstreamNodeId, int32
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t doTaskChkptStatusCheck(SStreamTask* pTask, int32_t num) {
|
static int32_t doTaskChkptStatusCheck(SStreamTask* pTask, void* param, int32_t num) {
|
||||||
SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo;
|
SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo;
|
||||||
SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptReadyMsgTmr;
|
SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptReadyMsgTmr;
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
int32_t vgId = pTask->pMeta->vgId;
|
int32_t vgId = pTask->pMeta->vgId;
|
||||||
|
|
||||||
if (pTmrInfo->launchChkptId != pActiveInfo->activeId) {
|
if (pTmrInfo->launchChkptId != pActiveInfo->activeId) {
|
||||||
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
|
streamCleanBeforeQuitTmr(pTmrInfo, param);
|
||||||
stWarn("s-task:%s vgId:%d ready-msg send tmr launched by previous checkpoint procedure, checkpointId:%" PRId64
|
stWarn("s-task:%s vgId:%d ready-msg send tmr launched by previous checkpoint procedure, checkpointId:%" PRId64
|
||||||
", quit, ref:%d",
|
", quit",
|
||||||
id, vgId, pTmrInfo->launchChkptId, ref);
|
id, vgId, pTmrInfo->launchChkptId);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// active checkpoint info is cleared for now
|
// active checkpoint info is cleared for now
|
||||||
if ((pActiveInfo->activeId == 0) || (pActiveInfo->transId == 0) || (num == 0) || (pTask->chkInfo.startTs == 0)) {
|
if ((pActiveInfo->activeId == 0) || (pActiveInfo->transId == 0) || (num == 0) || (pTask->chkInfo.startTs == 0)) {
|
||||||
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
|
streamCleanBeforeQuitTmr(pTmrInfo, param);
|
||||||
stWarn("s-task:%s vgId:%d active checkpoint may be cleared, quit from readyMsg send tmr, ref:%d", id, vgId, ref);
|
stWarn("s-task:%s vgId:%d active checkpoint may be cleared, quit from readyMsg send tmr", id, vgId);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (taosArrayGetSize(pTask->upstreamInfo.pList) != num) {
|
if (taosArrayGetSize(pTask->upstreamInfo.pList) != num) {
|
||||||
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
|
streamCleanBeforeQuitTmr(pTmrInfo, param);
|
||||||
stWarn("s-task:%s vgId:%d upstream number:%d not equals sent readyMsg:%d, quit from readyMsg send tmr, ref:%d", id,
|
stWarn("s-task:%s vgId:%d upstream number:%d not equals sent readyMsg:%d, quit from readyMsg send tmr", id,
|
||||||
vgId, (int32_t)taosArrayGetSize(pTask->upstreamInfo.pList), num, ref);
|
vgId, (int32_t)taosArrayGetSize(pTask->upstreamInfo.pList), num);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1011,7 +1038,7 @@ static void doSendChkptReadyMsg(SStreamTask* pTask, SArray* pNotRspList, int64_t
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t chkptReadyMsgSendHelper(SStreamTask* pTask, SArray* pNotRspList) {
|
static int32_t chkptReadyMsgSendHelper(SStreamTask* pTask, void* param, SArray* pNotRspList) {
|
||||||
SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo;
|
SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo;
|
||||||
SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptReadyMsgTmr;
|
SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptReadyMsgTmr;
|
||||||
SArray* pList = pActiveInfo->pReadyMsgList;
|
SArray* pList = pActiveInfo->pReadyMsgList;
|
||||||
|
@ -1021,16 +1048,15 @@ static int32_t chkptReadyMsgSendHelper(SStreamTask* pTask, SArray* pNotRspList)
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
int32_t notRsp = 0;
|
int32_t notRsp = 0;
|
||||||
|
|
||||||
int32_t code = doTaskChkptStatusCheck(pTask, num);
|
int32_t code = doTaskChkptStatusCheck(pTask, param, num);
|
||||||
if (code) {
|
if (code) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = doFindNotConfirmUpstream(&pNotRspList, pList, num, vgId, pTask->info.taskLevel, id);
|
code = doFindNotConfirmUpstream(&pNotRspList, pList, num, vgId, pTask->info.taskLevel, id);
|
||||||
if (code) {
|
if (code) {
|
||||||
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
|
streamCleanBeforeQuitTmr(pTmrInfo, param);
|
||||||
stError("s-task:%s failed to find not rsp checkpoint-ready downstream, code:%s, out of tmr, ref:%d", id,
|
stError("s-task:%s failed to find not rsp checkpoint-ready downstream, code:%s, out of tmr", id, tstrerror(code));
|
||||||
tstrerror(code), ref);
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1045,26 +1071,41 @@ static int32_t chkptReadyMsgSendHelper(SStreamTask* pTask, SArray* pNotRspList)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void chkptReadyMsgSendMonitorFn(void* param, void* tmrId) {
|
static void chkptReadyMsgSendMonitorFn(void* param, void* tmrId) {
|
||||||
SStreamTask* pTask = param;
|
|
||||||
int32_t vgId = pTask->pMeta->vgId;
|
|
||||||
const char* id = pTask->id.idStr;
|
|
||||||
SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo;
|
|
||||||
SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptReadyMsgTmr;
|
|
||||||
SArray* pNotRspList = NULL;
|
SArray* pNotRspList = NULL;
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
int32_t notRsp = 0;
|
int32_t notRsp = 0;
|
||||||
|
int64_t taskRefId = *(int64_t*)param;
|
||||||
|
int32_t vgId = -1;
|
||||||
|
const char* id = NULL;
|
||||||
|
SActiveCheckpointInfo* pActiveInfo = NULL;
|
||||||
|
SStreamTmrInfo* pTmrInfo = NULL;
|
||||||
|
|
||||||
|
SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId);
|
||||||
|
if (pTask == NULL) {
|
||||||
|
stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__);
|
||||||
|
streamTaskFreeRefId(param);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
vgId = pTask->pMeta->vgId;
|
||||||
|
id = pTask->id.idStr;
|
||||||
|
pActiveInfo = pTask->chkInfo.pActiveInfo;
|
||||||
|
pTmrInfo = &pActiveInfo->chkptReadyMsgTmr;
|
||||||
|
|
||||||
// check the status every 100ms
|
// check the status every 100ms
|
||||||
if (streamTaskShouldStop(pTask)) {
|
if (streamTaskShouldStop(pTask)) {
|
||||||
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
|
streamCleanBeforeQuitTmr(pTmrInfo, param);
|
||||||
stDebug("s-task:%s vgId:%d status:stop, quit from monitor checkpoint-trigger, ref:%d", id, vgId, ref);
|
stDebug("s-task:%s vgId:%d status:stop, quit from monitor checkpoint-trigger", id, vgId);
|
||||||
streamMetaReleaseTask(pTask->pMeta, pTask);
|
streamMetaReleaseTask(pTask->pMeta, pTask);
|
||||||
|
taosArrayDestroy(pNotRspList);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (++pTmrInfo->activeCounter < 50) {
|
if (++pTmrInfo->activeCounter < 50) {
|
||||||
streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId,
|
streamTmrStart(chkptReadyMsgSendMonitorFn, 200, param, streamTimer, &pTmrInfo->tmrHandle, vgId,
|
||||||
"chkpt-ready-monitor");
|
"chkpt-ready-monitor");
|
||||||
|
streamMetaReleaseTask(pTask->pMeta, pTask);
|
||||||
|
taosArrayDestroy(pNotRspList);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1078,15 +1119,16 @@ static void chkptReadyMsgSendMonitorFn(void* param, void* tmrId) {
|
||||||
|
|
||||||
// 1. check status in the first place
|
// 1. check status in the first place
|
||||||
if (state.state != TASK_STATUS__CK) {
|
if (state.state != TASK_STATUS__CK) {
|
||||||
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
|
streamCleanBeforeQuitTmr(pTmrInfo, param);
|
||||||
stDebug("s-task:%s vgId:%d status:%s not in checkpoint, quit from monitor checkpoint-ready, ref:%d", id, vgId,
|
stDebug("s-task:%s vgId:%d status:%s not in checkpoint, quit from monitor checkpoint-ready", id, vgId,
|
||||||
state.name, ref);
|
state.name);
|
||||||
streamMetaReleaseTask(pTask->pMeta, pTask);
|
streamMetaReleaseTask(pTask->pMeta, pTask);
|
||||||
|
taosArrayDestroy(pNotRspList);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMutexLock(&pActiveInfo->lock);
|
streamMutexLock(&pActiveInfo->lock);
|
||||||
code = chkptReadyMsgSendHelper(pTask, pNotRspList);
|
code = chkptReadyMsgSendHelper(pTask, param, pNotRspList);
|
||||||
streamMutexUnlock(&pActiveInfo->lock);
|
streamMutexUnlock(&pActiveInfo->lock);
|
||||||
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
@ -1098,18 +1140,18 @@ static void chkptReadyMsgSendMonitorFn(void* param, void* tmrId) {
|
||||||
notRsp = taosArrayGetSize(pNotRspList);
|
notRsp = taosArrayGetSize(pNotRspList);
|
||||||
if (notRsp > 0) { // send checkpoint-ready msg again
|
if (notRsp > 0) { // send checkpoint-ready msg again
|
||||||
stDebug("s-task:%s start to monitor checkpoint-ready msg recv status in 10s", id);
|
stDebug("s-task:%s start to monitor checkpoint-ready msg recv status in 10s", id);
|
||||||
streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId,
|
streamTmrStart(chkptReadyMsgSendMonitorFn, 200, param, streamTimer, &pTmrInfo->tmrHandle, vgId,
|
||||||
"chkpt-ready-monitor");
|
"chkpt-ready-monitor");
|
||||||
} else {
|
} else {
|
||||||
int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask);
|
streamCleanBeforeQuitTmr(pTmrInfo, param);
|
||||||
stDebug(
|
stDebug(
|
||||||
"s-task:%s vgId:%d checkpoint-ready msg confirmed by all upstream task(s), clear checkpoint-ready msg and quit "
|
"s-task:%s vgId:%d checkpoint-ready msg confirmed by all upstream task(s), clear checkpoint-ready msg and quit "
|
||||||
"from timer, ref:%d",
|
"from timer",
|
||||||
id, vgId, ref);
|
id, vgId);
|
||||||
// release should be the last execution, since pTask may be destroy after it immidiately.
|
|
||||||
streamMetaReleaseTask(pTask->pMeta, pTask);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// release should be the last execution, since pTask may be destroyed after it immediately.
|
||||||
|
streamMetaReleaseTask(pTask->pMeta, pTask);
|
||||||
taosArrayDestroy(pNotRspList);
|
taosArrayDestroy(pNotRspList);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1160,15 +1202,17 @@ int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask) {
|
||||||
|
|
||||||
int8_t old = atomic_val_compare_exchange_8(&pTmrInfo->isActive, 0, 1);
|
int8_t old = atomic_val_compare_exchange_8(&pTmrInfo->isActive, 0, 1);
|
||||||
if (old == 0) {
|
if (old == 0) {
|
||||||
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
stDebug("s-task:%s start checkpoint-ready monitor in 10s", pTask->id.idStr);
|
||||||
stDebug("s-task:%s start checkpoint-ready monitor in 10s, ref:%d ", pTask->id.idStr, ref);
|
|
||||||
int32_t unusedRetRef = streamMetaAcquireOneTask(pTask);
|
|
||||||
|
|
||||||
streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId,
|
int64_t* pTaskRefId = NULL;
|
||||||
"chkpt-ready-monitor");
|
int32_t code = streamTaskAllocRefId(pTask, &pTaskRefId);
|
||||||
|
if (code == 0) {
|
||||||
|
streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTaskRefId, streamTimer, &pTmrInfo->tmrHandle, vgId,
|
||||||
|
"chkpt-ready-monitor");
|
||||||
|
|
||||||
// mark the timer monitor checkpointId
|
// mark the timer monitor checkpointId
|
||||||
pTmrInfo->launchChkptId = pActiveInfo->activeId;
|
pTmrInfo->launchChkptId = pActiveInfo->activeId;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
stError("s-task:%s previous checkpoint-ready monitor tmr is set, not start new one", pTask->id.idStr);
|
stError("s-task:%s previous checkpoint-ready monitor tmr is set, not start new one", pTask->id.idStr);
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
#include "ttimer.h"
|
#include "ttimer.h"
|
||||||
#include "wal.h"
|
#include "wal.h"
|
||||||
|
|
||||||
int32_t streamMetaId = 0;
|
int32_t streamMetaRefPool = 0;
|
||||||
|
|
||||||
struct SMetaHbInfo {
|
struct SMetaHbInfo {
|
||||||
tmr_h hbTmr;
|
tmr_h hbTmr;
|
||||||
|
@ -123,17 +123,21 @@ int32_t streamMetaSendHbHelper(SStreamMeta* pMeta) {
|
||||||
|
|
||||||
for(int32_t i = 0; i < numOfTasks; ++i) {
|
for(int32_t i = 0; i < numOfTasks; ++i) {
|
||||||
SStreamTaskId* pId = taosArrayGet(pMeta->pTaskList, i);
|
SStreamTaskId* pId = taosArrayGet(pMeta->pTaskList, i);
|
||||||
STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId};
|
STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId};
|
||||||
SStreamTask** pTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
SStreamTask* pTask = NULL;
|
||||||
if (pTask == NULL) {
|
|
||||||
|
code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask);
|
||||||
|
if (code != 0) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((*pTask)->info.fillHistory == 1) {
|
if (pTask->info.fillHistory == 1) {
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
epsetAssign(&epset, &(*pTask)->info.mnodeEpset);
|
epsetAssign(&epset, &pTask->info.mnodeEpset);
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -159,28 +163,30 @@ int32_t streamMetaSendHbHelper(SStreamMeta* pMeta) {
|
||||||
for (int32_t i = 0; i < numOfTasks; ++i) {
|
for (int32_t i = 0; i < numOfTasks; ++i) {
|
||||||
SStreamTaskId* pId = taosArrayGet(pMeta->pTaskList, i);
|
SStreamTaskId* pId = taosArrayGet(pMeta->pTaskList, i);
|
||||||
|
|
||||||
STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId};
|
STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId};
|
||||||
SStreamTask** pTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
SStreamTask* pTask = NULL;
|
||||||
if (pTask == NULL) {
|
code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask);
|
||||||
|
if (code != 0) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// not report the status of fill-history task
|
// not report the status of fill-history task
|
||||||
if ((*pTask)->info.fillHistory == 1) {
|
if (pTask->info.fillHistory == 1) {
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMutexLock(&(*pTask)->lock);
|
streamMutexLock(&pTask->lock);
|
||||||
STaskStatusEntry entry = streamTaskGetStatusEntry(*pTask);
|
STaskStatusEntry entry = streamTaskGetStatusEntry(pTask);
|
||||||
streamMutexUnlock(&(*pTask)->lock);
|
streamMutexUnlock(&pTask->lock);
|
||||||
|
|
||||||
entry.inputRate = entry.inputQUsed * 100.0 / (2 * STREAM_TASK_QUEUE_CAPACITY_IN_SIZE);
|
entry.inputRate = entry.inputQUsed * 100.0 / (2 * STREAM_TASK_QUEUE_CAPACITY_IN_SIZE);
|
||||||
if ((*pTask)->info.taskLevel == TASK_LEVEL__SINK) {
|
if (pTask->info.taskLevel == TASK_LEVEL__SINK) {
|
||||||
entry.sinkQuota = (*pTask)->outputInfo.pTokenBucket->quotaRate;
|
entry.sinkQuota = pTask->outputInfo.pTokenBucket->quotaRate;
|
||||||
entry.sinkDataSize = SIZE_IN_MiB((*pTask)->execInfo.sink.dataSize);
|
entry.sinkDataSize = SIZE_IN_MiB(pTask->execInfo.sink.dataSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
SActiveCheckpointInfo* p = (*pTask)->chkInfo.pActiveInfo;
|
SActiveCheckpointInfo* p = pTask->chkInfo.pActiveInfo;
|
||||||
if (p->activeId != 0) {
|
if (p->activeId != 0) {
|
||||||
entry.checkpointInfo.failed = (p->failedId >= p->activeId) ? 1 : 0;
|
entry.checkpointInfo.failed = (p->failedId >= p->activeId) ? 1 : 0;
|
||||||
entry.checkpointInfo.activeId = p->activeId;
|
entry.checkpointInfo.activeId = p->activeId;
|
||||||
|
@ -188,40 +194,42 @@ int32_t streamMetaSendHbHelper(SStreamMeta* pMeta) {
|
||||||
|
|
||||||
if (entry.checkpointInfo.failed) {
|
if (entry.checkpointInfo.failed) {
|
||||||
stInfo("s-task:%s set kill checkpoint trans in hbMsg, transId:%d, clear the active checkpointInfo",
|
stInfo("s-task:%s set kill checkpoint trans in hbMsg, transId:%d, clear the active checkpointInfo",
|
||||||
(*pTask)->id.idStr, p->transId);
|
pTask->id.idStr, p->transId);
|
||||||
|
|
||||||
streamMutexLock(&(*pTask)->lock);
|
streamMutexLock(&pTask->lock);
|
||||||
streamTaskClearCheckInfo((*pTask), true);
|
streamTaskClearCheckInfo(pTask, true);
|
||||||
streamMutexUnlock(&(*pTask)->lock);
|
streamMutexUnlock(&pTask->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMutexLock(&(*pTask)->lock);
|
streamMutexLock(&pTask->lock);
|
||||||
entry.checkpointInfo.consensusChkptId = streamTaskCheckIfReqConsenChkptId(*pTask, pMsg->ts);
|
entry.checkpointInfo.consensusChkptId = streamTaskCheckIfReqConsenChkptId(pTask, pMsg->ts);
|
||||||
if (entry.checkpointInfo.consensusChkptId) {
|
if (entry.checkpointInfo.consensusChkptId) {
|
||||||
entry.checkpointInfo.consensusTs = pMsg->ts;
|
entry.checkpointInfo.consensusTs = pMsg->ts;
|
||||||
}
|
}
|
||||||
streamMutexUnlock(&(*pTask)->lock);
|
streamMutexUnlock(&pTask->lock);
|
||||||
|
|
||||||
if ((*pTask)->exec.pWalReader != NULL) {
|
if (pTask->exec.pWalReader != NULL) {
|
||||||
entry.processedVer = walReaderGetCurrentVer((*pTask)->exec.pWalReader) - 1;
|
entry.processedVer = walReaderGetCurrentVer(pTask->exec.pWalReader) - 1;
|
||||||
if (entry.processedVer < 0) {
|
if (entry.processedVer < 0) {
|
||||||
entry.processedVer = (*pTask)->chkInfo.processedVer;
|
entry.processedVer = pTask->chkInfo.processedVer;
|
||||||
}
|
}
|
||||||
|
|
||||||
walReaderValidVersionRange((*pTask)->exec.pWalReader, &entry.verRange.minVer, &entry.verRange.maxVer);
|
walReaderValidVersionRange(pTask->exec.pWalReader, &entry.verRange.minVer, &entry.verRange.maxVer);
|
||||||
}
|
}
|
||||||
|
|
||||||
addUpdateNodeIntoHbMsg(*pTask, pMsg);
|
addUpdateNodeIntoHbMsg(pTask, pMsg);
|
||||||
p = taosArrayPush(pMsg->pTaskStatus, &entry);
|
p = taosArrayPush(pMsg->pTaskStatus, &entry);
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
stError("failed to add taskInfo:0x%x in hbMsg, vgId:%d", (*pTask)->id.taskId, pMeta->vgId);
|
stError("failed to add taskInfo:0x%x in hbMsg, vgId:%d", pTask->id.taskId, pMeta->vgId);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!hasMnodeEpset) {
|
if (!hasMnodeEpset) {
|
||||||
epsetAssign(&epset, &(*pTask)->info.mnodeEpset);
|
epsetAssign(&epset, &pTask->info.mnodeEpset);
|
||||||
hasMnodeEpset = true;
|
hasMnodeEpset = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
}
|
}
|
||||||
|
|
||||||
pMsg->numOfTasks = taosArrayGetSize(pMsg->pTaskStatus);
|
pMsg->numOfTasks = taosArrayGetSize(pMsg->pTaskStatus);
|
||||||
|
@ -244,9 +252,10 @@ void streamMetaHbToMnode(void* param, void* tmrId) {
|
||||||
int32_t vgId = 0;
|
int32_t vgId = 0;
|
||||||
int32_t role = 0;
|
int32_t role = 0;
|
||||||
|
|
||||||
SStreamMeta* pMeta = taosAcquireRef(streamMetaId, rid);
|
SStreamMeta* pMeta = taosAcquireRef(streamMetaRefPool, rid);
|
||||||
if (pMeta == NULL) {
|
if (pMeta == NULL) {
|
||||||
stError("invalid rid:%" PRId64 " failed to acquired stream-meta", rid);
|
stError("invalid meta rid:%" PRId64 " failed to acquired stream-meta", rid);
|
||||||
|
// taosMemoryFree(param);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -256,24 +265,26 @@ void streamMetaHbToMnode(void* param, void* tmrId) {
|
||||||
// need to stop, stop now
|
// need to stop, stop now
|
||||||
if (pMeta->closeFlag) {
|
if (pMeta->closeFlag) {
|
||||||
pMeta->pHbInfo->hbStart = 0;
|
pMeta->pHbInfo->hbStart = 0;
|
||||||
code = taosReleaseRef(streamMetaId, rid);
|
code = taosReleaseRef(streamMetaRefPool, rid);
|
||||||
if (code == TSDB_CODE_SUCCESS) {
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
stDebug("vgId:%d jump out of meta timer", vgId);
|
stDebug("vgId:%d jump out of meta timer", vgId);
|
||||||
} else {
|
} else {
|
||||||
stError("vgId:%d jump out of meta timer, failed to release the meta rid:%" PRId64, vgId, rid);
|
stError("vgId:%d jump out of meta timer, failed to release the meta rid:%" PRId64, vgId, rid);
|
||||||
}
|
}
|
||||||
|
// taosMemoryFree(param);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// not leader not send msg
|
// not leader not send msg
|
||||||
if (pMeta->role != NODE_ROLE_LEADER) {
|
if (pMeta->role != NODE_ROLE_LEADER) {
|
||||||
pMeta->pHbInfo->hbStart = 0;
|
pMeta->pHbInfo->hbStart = 0;
|
||||||
code = taosReleaseRef(streamMetaId, rid);
|
code = taosReleaseRef(streamMetaRefPool, rid);
|
||||||
if (code == TSDB_CODE_SUCCESS) {
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
stInfo("vgId:%d role:%d not leader not send hb to mnode", vgId, role);
|
stInfo("vgId:%d role:%d not leader not send hb to mnode", vgId, role);
|
||||||
} else {
|
} else {
|
||||||
stError("vgId:%d role:%d not leader not send hb to mnodefailed to release the meta rid:%" PRId64, vgId, role, rid);
|
stError("vgId:%d role:%d not leader not send hb to mnodefailed to release the meta rid:%" PRId64, vgId, role, rid);
|
||||||
}
|
}
|
||||||
|
// taosMemoryFree(param);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -281,7 +292,7 @@ void streamMetaHbToMnode(void* param, void* tmrId) {
|
||||||
streamTmrStart(streamMetaHbToMnode, META_HB_CHECK_INTERVAL, param, streamTimer, &pMeta->pHbInfo->hbTmr, vgId,
|
streamTmrStart(streamMetaHbToMnode, META_HB_CHECK_INTERVAL, param, streamTimer, &pMeta->pHbInfo->hbTmr, vgId,
|
||||||
"meta-hb-tmr");
|
"meta-hb-tmr");
|
||||||
|
|
||||||
code = taosReleaseRef(streamMetaId, rid);
|
code = taosReleaseRef(streamMetaRefPool, rid);
|
||||||
if (code) {
|
if (code) {
|
||||||
stError("vgId:%d in meta timer, failed to release the meta rid:%" PRId64, vgId, rid);
|
stError("vgId:%d in meta timer, failed to release the meta rid:%" PRId64, vgId, rid);
|
||||||
}
|
}
|
||||||
|
@ -298,12 +309,13 @@ void streamMetaHbToMnode(void* param, void* tmrId) {
|
||||||
if (code) {
|
if (code) {
|
||||||
stError("vgId:%d failed to send hmMsg to mnode, try again in 5s, code:%s", pMeta->vgId, tstrerror(code));
|
stError("vgId:%d failed to send hmMsg to mnode, try again in 5s, code:%s", pMeta->vgId, tstrerror(code));
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMetaRUnLock(pMeta);
|
streamMetaRUnLock(pMeta);
|
||||||
|
|
||||||
streamTmrStart(streamMetaHbToMnode, META_HB_CHECK_INTERVAL, param, streamTimer, &pMeta->pHbInfo->hbTmr, pMeta->vgId,
|
streamTmrStart(streamMetaHbToMnode, META_HB_CHECK_INTERVAL, param, streamTimer, &pMeta->pHbInfo->hbTmr, pMeta->vgId,
|
||||||
"meta-hb-tmr");
|
"meta-hb-tmr");
|
||||||
|
|
||||||
code = taosReleaseRef(streamMetaId, rid);
|
code = taosReleaseRef(streamMetaRefPool, rid);
|
||||||
if (code) {
|
if (code) {
|
||||||
stError("vgId:%d in meta timer, failed to release the meta rid:%" PRId64, vgId, rid);
|
stError("vgId:%d in meta timer, failed to release the meta rid:%" PRId64, vgId, rid);
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,6 @@
|
||||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "executor.h"
|
|
||||||
#include "streamBackendRocksdb.h"
|
#include "streamBackendRocksdb.h"
|
||||||
#include "streamInt.h"
|
#include "streamInt.h"
|
||||||
#include "tmisce.h"
|
#include "tmisce.h"
|
||||||
|
@ -28,6 +27,7 @@ static TdThreadOnce streamMetaModuleInit = PTHREAD_ONCE_INIT;
|
||||||
int32_t streamBackendId = 0;
|
int32_t streamBackendId = 0;
|
||||||
int32_t streamBackendCfWrapperId = 0;
|
int32_t streamBackendCfWrapperId = 0;
|
||||||
int32_t taskDbWrapperId = 0;
|
int32_t taskDbWrapperId = 0;
|
||||||
|
int32_t streamTaskRefPool = 0;
|
||||||
|
|
||||||
static int32_t streamMetaBegin(SStreamMeta* pMeta);
|
static int32_t streamMetaBegin(SStreamMeta* pMeta);
|
||||||
static void streamMetaCloseImpl(void* arg);
|
static void streamMetaCloseImpl(void* arg);
|
||||||
|
@ -41,14 +41,14 @@ SMetaRefMgt gMetaRefMgt;
|
||||||
|
|
||||||
int32_t metaRefMgtInit();
|
int32_t metaRefMgtInit();
|
||||||
void metaRefMgtCleanup();
|
void metaRefMgtCleanup();
|
||||||
int32_t metaRefMgtAdd(int64_t vgId, int64_t* rid);
|
|
||||||
|
|
||||||
static void streamMetaEnvInit() {
|
static void streamMetaEnvInit() {
|
||||||
streamBackendId = taosOpenRef(64, streamBackendCleanup);
|
streamBackendId = taosOpenRef(64, streamBackendCleanup);
|
||||||
streamBackendCfWrapperId = taosOpenRef(64, streamBackendHandleCleanup);
|
streamBackendCfWrapperId = taosOpenRef(64, streamBackendHandleCleanup);
|
||||||
taskDbWrapperId = taosOpenRef(64, taskDbDestroy2);
|
taskDbWrapperId = taosOpenRef(64, taskDbDestroy2);
|
||||||
|
|
||||||
streamMetaId = taosOpenRef(64, streamMetaCloseImpl);
|
streamMetaRefPool = taosOpenRef(64, streamMetaCloseImpl);
|
||||||
|
streamTaskRefPool = taosOpenRef(64, tFreeStreamTask);
|
||||||
|
|
||||||
int32_t code = metaRefMgtInit();
|
int32_t code = metaRefMgtInit();
|
||||||
if (code) {
|
if (code) {
|
||||||
|
@ -72,7 +72,8 @@ void streamMetaInit() {
|
||||||
void streamMetaCleanup() {
|
void streamMetaCleanup() {
|
||||||
taosCloseRef(streamBackendId);
|
taosCloseRef(streamBackendId);
|
||||||
taosCloseRef(streamBackendCfWrapperId);
|
taosCloseRef(streamBackendCfWrapperId);
|
||||||
taosCloseRef(streamMetaId);
|
taosCloseRef(streamMetaRefPool);
|
||||||
|
taosCloseRef(streamTaskRefPool);
|
||||||
|
|
||||||
metaRefMgtCleanup();
|
metaRefMgtCleanup();
|
||||||
streamTimerCleanUp();
|
streamTimerCleanUp();
|
||||||
|
@ -98,16 +99,12 @@ int32_t metaRefMgtInit() {
|
||||||
void metaRefMgtCleanup() {
|
void metaRefMgtCleanup() {
|
||||||
void* pIter = taosHashIterate(gMetaRefMgt.pTable, NULL);
|
void* pIter = taosHashIterate(gMetaRefMgt.pTable, NULL);
|
||||||
while (pIter) {
|
while (pIter) {
|
||||||
SArray* list = *(SArray**)pIter;
|
int64_t* p = *(int64_t**) pIter;
|
||||||
for (int i = 0; i < taosArrayGetSize(list); i++) {
|
taosMemoryFree(p);
|
||||||
void* rid = taosArrayGetP(list, i);
|
|
||||||
taosMemoryFree(rid);
|
|
||||||
}
|
|
||||||
taosArrayDestroy(list);
|
|
||||||
pIter = taosHashIterate(gMetaRefMgt.pTable, pIter);
|
pIter = taosHashIterate(gMetaRefMgt.pTable, pIter);
|
||||||
}
|
}
|
||||||
taosHashCleanup(gMetaRefMgt.pTable);
|
|
||||||
|
|
||||||
|
taosHashCleanup(gMetaRefMgt.pTable);
|
||||||
streamMutexDestroy(&gMetaRefMgt.mutex);
|
streamMutexDestroy(&gMetaRefMgt.mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,35 +114,32 @@ int32_t metaRefMgtAdd(int64_t vgId, int64_t* rid) {
|
||||||
|
|
||||||
streamMutexLock(&gMetaRefMgt.mutex);
|
streamMutexLock(&gMetaRefMgt.mutex);
|
||||||
|
|
||||||
p = taosHashGet(gMetaRefMgt.pTable, &vgId, sizeof(vgId));
|
p = taosHashGet(gMetaRefMgt.pTable, &rid, sizeof(rid));
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
SArray* pList = taosArrayInit(8, POINTER_BYTES);
|
code = taosHashPut(gMetaRefMgt.pTable, &rid, sizeof(rid), &rid, sizeof(void*));
|
||||||
if (pList == NULL) {
|
|
||||||
return terrno;
|
|
||||||
}
|
|
||||||
|
|
||||||
p = taosArrayPush(pList, &rid);
|
|
||||||
if (p == NULL) {
|
|
||||||
return terrno;
|
|
||||||
}
|
|
||||||
|
|
||||||
code = taosHashPut(gMetaRefMgt.pTable, &vgId, sizeof(vgId), &pList, sizeof(void*));
|
|
||||||
if (code) {
|
if (code) {
|
||||||
stError("vgId:%d failed to put into metaRef table, rid:%" PRId64, (int32_t)vgId, *rid);
|
stError("vgId:%d failed to put into refId mgt, refId:%" PRId64" %p, code:%s", (int32_t)vgId, *rid, rid,
|
||||||
|
tstrerror(code));
|
||||||
return code;
|
return code;
|
||||||
|
} else { // not
|
||||||
|
// stInfo("add refId:%"PRId64" vgId:%d, %p", *rid, (int32_t)vgId, rid);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
SArray* list = *(SArray**)p;
|
stFatal("try to add refId:%"PRId64" vgId:%d, %p that already added into mgt", *rid, (int32_t) vgId, rid);
|
||||||
void* px = taosArrayPush(list, &rid);
|
|
||||||
if (px == NULL) {
|
|
||||||
code = terrno;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMutexUnlock(&gMetaRefMgt.mutex);
|
streamMutexUnlock(&gMetaRefMgt.mutex);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void metaRefMgtRemove(int64_t* pRefId) {
|
||||||
|
streamMutexLock(&gMetaRefMgt.mutex);
|
||||||
|
|
||||||
|
int32_t code = taosHashRemove(gMetaRefMgt.pTable, &pRefId, sizeof(pRefId));
|
||||||
|
taosMemoryFree(pRefId);
|
||||||
|
streamMutexUnlock(&gMetaRefMgt.mutex);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t streamMetaOpenTdb(SStreamMeta* pMeta) {
|
int32_t streamMetaOpenTdb(SStreamMeta* pMeta) {
|
||||||
if (tdbOpen(pMeta->path, 16 * 1024, 1, &pMeta->db, 0, 0, NULL) < 0) {
|
if (tdbOpen(pMeta->path, 16 * 1024, 1, &pMeta->db, 0, 0, NULL) < 0) {
|
||||||
stError("vgId:%d open file:%s failed, stream meta open failed", pMeta->vgId, pMeta->path);
|
stError("vgId:%d open file:%s failed, stream meta open failed", pMeta->vgId, pMeta->path);
|
||||||
|
@ -458,7 +452,7 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn,
|
||||||
TSDB_CHECK_CODE(code, lino, _err);
|
TSDB_CHECK_CODE(code, lino, _err);
|
||||||
|
|
||||||
// add refId at the end of initialization function
|
// add refId at the end of initialization function
|
||||||
pMeta->rid = taosAddRef(streamMetaId, pMeta);
|
pMeta->rid = taosAddRef(streamMetaRefPool, pMeta);
|
||||||
|
|
||||||
int64_t* pRid = taosMemoryMalloc(sizeof(int64_t));
|
int64_t* pRid = taosMemoryMalloc(sizeof(int64_t));
|
||||||
TSDB_CHECK_NULL(pRid, code, lino, _err, terrno);
|
TSDB_CHECK_NULL(pRid, code, lino, _err, terrno);
|
||||||
|
@ -531,17 +525,28 @@ void streamMetaClear(SStreamMeta* pMeta) {
|
||||||
// remove all existed tasks in this vnode
|
// remove all existed tasks in this vnode
|
||||||
void* pIter = NULL;
|
void* pIter = NULL;
|
||||||
while ((pIter = taosHashIterate(pMeta->pTasksMap, pIter)) != NULL) {
|
while ((pIter = taosHashIterate(pMeta->pTasksMap, pIter)) != NULL) {
|
||||||
SStreamTask* p = *(SStreamTask**)pIter;
|
int64_t refId = *(int64_t*)pIter;
|
||||||
|
SStreamTask* p = taosAcquireRef(streamTaskRefPool, refId);
|
||||||
|
if (p == NULL) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
// release the ref by timer
|
// release the ref by timer
|
||||||
if (p->info.delaySchedParam != 0 && p->info.fillHistory == 0) { // one more ref in timer
|
if (p->info.delaySchedParam != 0 && p->info.fillHistory == 0) { // one more ref in timer
|
||||||
stDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", p->id.idStr, p->refCnt);
|
stDebug("s-task:%s stop schedTimer", p->id.idStr);
|
||||||
streamTmrStop(p->schedInfo.pDelayTimer);
|
streamTmrStop(p->schedInfo.pDelayTimer);
|
||||||
p->info.delaySchedParam = 0;
|
p->info.delaySchedParam = 0;
|
||||||
streamMetaReleaseTask(pMeta, p);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMetaReleaseTask(pMeta, p);
|
int32_t code = taosRemoveRef(streamTaskRefPool, refId);
|
||||||
|
if (code) {
|
||||||
|
stError("vgId:%d remove task refId failed, refId:%" PRId64, pMeta->vgId, refId);
|
||||||
|
}
|
||||||
|
|
||||||
|
code = taosReleaseRef(streamTaskRefPool, refId);
|
||||||
|
if (code) {
|
||||||
|
stError("vgId:%d failed to release refId:%" PRId64, pMeta->vgId, refId);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pMeta->streamBackendRid != 0) {
|
if (pMeta->streamBackendRid != 0) {
|
||||||
|
@ -571,9 +576,9 @@ void streamMetaClose(SStreamMeta* pMeta) {
|
||||||
if (pMeta == NULL) {
|
if (pMeta == NULL) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
int32_t code = taosRemoveRef(streamMetaId, pMeta->rid);
|
int32_t code = taosRemoveRef(streamMetaRefPool, pMeta->rid);
|
||||||
if (code) {
|
if (code) {
|
||||||
stError("vgId:%d failed to remove ref:%" PRId64 ", code:%s", pMeta->vgId, pMeta->rid, tstrerror(code));
|
stError("vgId:%d failed to remove meta ref:%" PRId64 ", code:%s", pMeta->vgId, pMeta->rid, tstrerror(code));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -660,9 +665,16 @@ int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask) {
|
||||||
code = tdbTbUpsert(pMeta->pTaskDb, id, STREAM_TASK_KEY_LEN, buf, len, pMeta->txn);
|
code = tdbTbUpsert(pMeta->pTaskDb, id, STREAM_TASK_KEY_LEN, buf, len, pMeta->txn);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
code = terrno;
|
code = terrno;
|
||||||
stError("s-task:%s vgId:%d task meta save to disk failed, code:%s", pTask->id.idStr, vgId, tstrerror(terrno));
|
stError("s-task:%s vgId:%d refId:%" PRId64 " task meta save to disk failed, remove ref, code:%s", pTask->id.idStr,
|
||||||
|
vgId, pTask->id.refId, tstrerror(code));
|
||||||
|
|
||||||
|
int64_t refId = pTask->id.refId;
|
||||||
|
int32_t ret = taosRemoveRef(streamTaskRefPool, pTask->id.refId);
|
||||||
|
if (ret != 0) {
|
||||||
|
stError("s-task:0x%x failed to remove ref, refId:%"PRId64, (int32_t) id[1], refId);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
stDebug("s-task:%s vgId:%d task meta save to disk", pTask->id.idStr, vgId);
|
stDebug("s-task:%s vgId:%d refId:%" PRId64 " task meta save to disk", pTask->id.idStr, vgId, pTask->id.refId);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosMemoryFree(buf);
|
taosMemoryFree(buf);
|
||||||
|
@ -687,34 +699,54 @@ int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTa
|
||||||
*pAdded = false;
|
*pAdded = false;
|
||||||
|
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
int64_t refId = 0;
|
||||||
STaskId id = streamTaskGetTaskId(pTask);
|
STaskId id = streamTaskGetTaskId(pTask);
|
||||||
void* p = taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
void* p = taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||||
|
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
stDebug("s-task:%" PRIx64 " already exist in meta, no need to register", id.taskId);
|
stDebug("s-task:%" PRIx64 " already exist in meta, no need to register", id.taskId);
|
||||||
|
tFreeStreamTask(pTask);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((code = pMeta->buildTaskFn(pMeta->ahandle, pTask, ver)) != 0) {
|
if ((code = pMeta->buildTaskFn(pMeta->ahandle, pTask, ver)) != 0) {
|
||||||
|
tFreeStreamTask(pTask);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
p = taosArrayPush(pMeta->pTaskList, &pTask->id);
|
p = taosArrayPush(pMeta->pTaskList, &pTask->id);
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
stError("s-task:0x%" PRIx64 " failed to register task into meta-list, code: out of memory", id.taskId);
|
stError("s-task:0x%" PRIx64 " failed to register task into meta-list, code: out of memory", id.taskId);
|
||||||
|
tFreeStreamTask(pTask);
|
||||||
return terrno;
|
return terrno;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = taosHashPut(pMeta->pTasksMap, &id, sizeof(id), &pTask, POINTER_BYTES);
|
pTask->id.refId = refId = taosAddRef(streamTaskRefPool, pTask);
|
||||||
|
code = taosHashPut(pMeta->pTasksMap, &id, sizeof(id), &pTask->id.refId, sizeof(int64_t));
|
||||||
if (code) {
|
if (code) {
|
||||||
stError("s-task:0x%" PRIx64 " failed to register task into meta-list, code: out of memory", id.taskId);
|
stError("s-task:0x%" PRIx64 " failed to register task into meta-list, code: out of memory", id.taskId);
|
||||||
|
|
||||||
|
int32_t ret = taosRemoveRef(streamTaskRefPool, refId);
|
||||||
|
if (ret != 0) {
|
||||||
|
stError("s-task:0x%x failed to remove ref, refId:%"PRId64, (int32_t) id.taskId, refId);
|
||||||
|
}
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((code = streamMetaSaveTask(pMeta, pTask)) != 0) {
|
if ((code = streamMetaSaveTask(pMeta, pTask)) != 0) {
|
||||||
|
int32_t ret = taosRemoveRef(streamTaskRefPool, refId);
|
||||||
|
if (ret) {
|
||||||
|
stError("vgId:%d remove task refId failed, refId:%" PRId64, pMeta->vgId, refId);
|
||||||
|
}
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((code = streamMetaCommit(pMeta)) != 0) {
|
if ((code = streamMetaCommit(pMeta)) != 0) {
|
||||||
|
int32_t ret = taosRemoveRef(streamTaskRefPool, refId);
|
||||||
|
if (ret) {
|
||||||
|
stError("vgId:%d remove task refId failed, refId:%" PRId64, pMeta->vgId, refId);
|
||||||
|
}
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -722,6 +754,9 @@ int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTa
|
||||||
int32_t val = atomic_add_fetch_32(&pMeta->numOfStreamTasks, 1);
|
int32_t val = atomic_add_fetch_32(&pMeta->numOfStreamTasks, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// enable the scheduler for stream tasks
|
||||||
|
streamSetupScheduleTrigger(pTask);
|
||||||
|
|
||||||
*pAdded = true;
|
*pAdded = true;
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
@ -737,16 +772,72 @@ int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask) {
|
int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask) {
|
||||||
STaskId id = {.streamId = streamId, .taskId = taskId};
|
QRY_PARAM_CHECK(pTask);
|
||||||
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
STaskId id = {.streamId = streamId, .taskId = taskId};
|
||||||
if (ppTask == NULL || streamTaskShouldStop(*ppTask)) {
|
int64_t* pTaskRefId = (int64_t*)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||||
*pTask = NULL;
|
if (pTaskRefId == NULL) {
|
||||||
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t ref = atomic_add_fetch_32(&(*ppTask)->refCnt, 1);
|
SStreamTask* p = taosAcquireRef(streamTaskRefPool, *pTaskRefId);
|
||||||
stTrace("s-task:%s acquire task, ref:%d", (*ppTask)->id.idStr, ref);
|
if (p == NULL) {
|
||||||
*pTask = *ppTask;
|
stDebug("s-task:%x failed to acquire task refId:%"PRId64", may have been destoried", taskId, *pTaskRefId);
|
||||||
|
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (p->id.refId != *pTaskRefId) {
|
||||||
|
stFatal("s-task:%x inconsistent refId, task refId:%" PRId64 " try acquire:%" PRId64, taskId, *pTaskRefId,
|
||||||
|
p->id.refId);
|
||||||
|
int32_t ret = taosReleaseRef(streamTaskRefPool, *pTaskRefId);
|
||||||
|
if (ret) {
|
||||||
|
stError("s-task:0x%x failed to release task refId:%" PRId64, taskId, *pTaskRefId);
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (streamTaskShouldStop(p)) {
|
||||||
|
stDebug("s-task:%s is stopped, failed to acquire it now", p->id.idStr);
|
||||||
|
int32_t ret = taosReleaseRef(streamTaskRefPool, *pTaskRefId);
|
||||||
|
if (ret) {
|
||||||
|
stError("s-task:0x%x failed to release task refId:%" PRId64, taskId, *pTaskRefId);
|
||||||
|
}
|
||||||
|
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||||
|
}
|
||||||
|
|
||||||
|
stDebug("s-task:%s acquire task, refId:%" PRId64, p->id.idStr, p->id.refId);
|
||||||
|
*pTask = p;
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t streamMetaAcquireTaskUnsafe(SStreamMeta* pMeta, STaskId* pId, SStreamTask** pTask) {
|
||||||
|
QRY_PARAM_CHECK(pTask);
|
||||||
|
int64_t* pTaskRefId = (int64_t*)taosHashGet(pMeta->pTasksMap, pId, sizeof(*pId));
|
||||||
|
|
||||||
|
if (pTaskRefId == NULL) {
|
||||||
|
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||||
|
}
|
||||||
|
|
||||||
|
SStreamTask* p = taosAcquireRef(streamTaskRefPool, *pTaskRefId);
|
||||||
|
if (p == NULL) {
|
||||||
|
stDebug("s-task:%" PRIx64 " failed to acquire task refId:%" PRId64 ", may have been destoried", pId->taskId,
|
||||||
|
*pTaskRefId);
|
||||||
|
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (p->id.refId != *pTaskRefId) {
|
||||||
|
stFatal("s-task:%" PRIx64 " inconsistent refId, task refId:%" PRId64 " try acquire:%" PRId64, pId->taskId,
|
||||||
|
*pTaskRefId, p->id.refId);
|
||||||
|
int32_t ret = taosReleaseRef(streamTaskRefPool, *pTaskRefId);
|
||||||
|
if (ret) {
|
||||||
|
stError("s-task:0x%" PRIx64 " failed to release task refId:%" PRId64, pId->taskId, *pTaskRefId);
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||||
|
}
|
||||||
|
|
||||||
|
stDebug("s-task:%s acquire task, refId:%" PRId64, p->id.idStr, p->id.refId);
|
||||||
|
*pTask = p;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -757,28 +848,17 @@ int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t task
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamMetaAcquireOneTask(SStreamTask* pTask) {
|
|
||||||
int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1);
|
|
||||||
stTrace("s-task:%s acquire task, ref:%d", pTask->id.idStr, ref);
|
|
||||||
return ref;
|
|
||||||
}
|
|
||||||
|
|
||||||
void streamMetaReleaseTask(SStreamMeta* UNUSED_PARAM(pMeta), SStreamTask* pTask) {
|
void streamMetaReleaseTask(SStreamMeta* UNUSED_PARAM(pMeta), SStreamTask* pTask) {
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t taskId = pTask->id.taskId;
|
int32_t taskId = pTask->id.taskId;
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->refCnt, 1);
|
int64_t refId = pTask->id.refId;
|
||||||
|
stDebug("s-task:0x%x release task, refId:%" PRId64, taskId, pTask->id.refId);
|
||||||
// not safe to use the pTask->id.idStr, since pTask may be released by other threads when print logs.
|
int32_t ret = taosReleaseRef(streamTaskRefPool, pTask->id.refId);
|
||||||
if (ref > 0) {
|
if (ret) {
|
||||||
stTrace("s-task:0x%x release task, ref:%d", taskId, ref);
|
stError("s-task:0x%x failed to release task refId:%" PRId64, taskId, refId);
|
||||||
} else if (ref == 0) {
|
|
||||||
stTrace("s-task:0x%x all refs are gone, free it", taskId);
|
|
||||||
tFreeStreamTask(pTask);
|
|
||||||
} else if (ref < 0) {
|
|
||||||
stError("task ref is invalid, ref:%d, 0x%x", ref, taskId);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -816,13 +896,10 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
STaskId id = {.streamId = streamId, .taskId = taskId};
|
STaskId id = {.streamId = streamId, .taskId = taskId};
|
||||||
|
|
||||||
// pre-delete operation
|
|
||||||
streamMetaWLock(pMeta);
|
streamMetaWLock(pMeta);
|
||||||
|
|
||||||
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask);
|
||||||
if (ppTask) {
|
if (code == 0) {
|
||||||
pTask = *ppTask;
|
|
||||||
|
|
||||||
// desc the paused task counter
|
// desc the paused task counter
|
||||||
if (streamTaskShouldPause(pTask)) {
|
if (streamTaskShouldPause(pTask)) {
|
||||||
int32_t num = atomic_sub_fetch_32(&pMeta->numOfPausedTasks, 1);
|
int32_t num = atomic_sub_fetch_32(&pMeta->numOfPausedTasks, 1);
|
||||||
|
@ -834,43 +911,9 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
|
||||||
if (code) {
|
if (code) {
|
||||||
stError("s-task:0x%" PRIx64 " failed to handle dropping event async, code:%s", id.taskId, tstrerror(code));
|
stError("s-task:0x%" PRIx64 " failed to handle dropping event async, code:%s", id.taskId, tstrerror(code));
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
stDebug("vgId:%d failed to find the task:0x%x, it may be dropped already", vgId, taskId);
|
|
||||||
streamMetaWUnLock(pMeta);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
streamMetaWUnLock(pMeta);
|
stDebug("s-task:0x%x vgId:%d set task status:dropping and start to unregister it", taskId, vgId);
|
||||||
|
|
||||||
stDebug("s-task:0x%x vgId:%d set task status:dropping and start to unregister it", taskId, vgId);
|
|
||||||
|
|
||||||
while (1) {
|
|
||||||
int32_t timerActive = 0;
|
|
||||||
|
|
||||||
streamMetaRLock(pMeta);
|
|
||||||
ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
|
||||||
if (ppTask) {
|
|
||||||
// to make sure check status will not start the check downstream status when we start to check timerActive count.
|
|
||||||
streamMutexLock(&pTask->taskCheckInfo.checkInfoLock);
|
|
||||||
timerActive = (*ppTask)->status.timerActive;
|
|
||||||
streamMutexUnlock(&pTask->taskCheckInfo.checkInfoLock);
|
|
||||||
}
|
|
||||||
streamMetaRUnLock(pMeta);
|
|
||||||
|
|
||||||
if (timerActive > 0) {
|
|
||||||
taosMsleep(100);
|
|
||||||
stDebug("s-task:0x%" PRIx64 " wait for quit from timer", id.taskId);
|
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// let's do delete of stream task
|
|
||||||
streamMetaWLock(pMeta);
|
|
||||||
|
|
||||||
ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
|
||||||
if (ppTask) {
|
|
||||||
pTask = *ppTask;
|
|
||||||
// it is a fill-history task, remove the related stream task's id that points to it
|
// it is a fill-history task, remove the related stream task's id that points to it
|
||||||
if (pTask->info.fillHistory == 0) {
|
if (pTask->info.fillHistory == 0) {
|
||||||
int32_t ret = atomic_sub_fetch_32(&pMeta->numOfStreamTasks, 1);
|
int32_t ret = atomic_sub_fetch_32(&pMeta->numOfStreamTasks, 1);
|
||||||
|
@ -888,21 +931,22 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
|
||||||
if (sizeInList != size) {
|
if (sizeInList != size) {
|
||||||
stError("vgId:%d tasks number not consistent in list:%d and map:%d, ", vgId, sizeInList, size);
|
stError("vgId:%d tasks number not consistent in list:%d and map:%d, ", vgId, sizeInList, size);
|
||||||
}
|
}
|
||||||
streamMetaWUnLock(pMeta);
|
|
||||||
|
|
||||||
int32_t numOfTmr = pTask->status.timerActive;
|
|
||||||
if (numOfTmr != 0) {
|
|
||||||
stError("s-task:%s vgId:%d invalid timer Active record:%d, internal error", pTask->id.idStr, vgId, numOfTmr);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pTask->info.delaySchedParam != 0 && pTask->info.fillHistory == 0) {
|
if (pTask->info.delaySchedParam != 0 && pTask->info.fillHistory == 0) {
|
||||||
stDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", pTask->id.idStr, pTask->refCnt);
|
stDebug("s-task:%s stop schedTimer", pTask->id.idStr);
|
||||||
streamTmrStop(pTask->schedInfo.pDelayTimer);
|
streamTmrStop(pTask->schedInfo.pDelayTimer);
|
||||||
pTask->info.delaySchedParam = 0;
|
pTask->info.delaySchedParam = 0;
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int64_t refId = pTask->id.refId;
|
||||||
|
int32_t ret = taosRemoveRef(streamTaskRefPool, refId);
|
||||||
|
if (ret != 0) {
|
||||||
|
stError("s-task:0x%x failed to remove ref, refId:%"PRId64, (int32_t) id.taskId, refId);
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
streamMetaWUnLock(pMeta);
|
||||||
} else {
|
} else {
|
||||||
stDebug("vgId:%d failed to find the task:0x%x, it may have been dropped already", vgId, taskId);
|
stDebug("vgId:%d failed to find the task:0x%x, it may have been dropped already", vgId, taskId);
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
|
@ -1012,13 +1056,13 @@ void streamMetaLoadAllTasks(SStreamMeta* pMeta) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vgId = pMeta->vgId;
|
||||||
pRecycleList = taosArrayInit(4, sizeof(STaskId));
|
pRecycleList = taosArrayInit(4, sizeof(STaskId));
|
||||||
if (pRecycleList == NULL) {
|
if (pRecycleList == NULL) {
|
||||||
stError("vgId:%d failed prepare load all tasks, code:out of memory", vgId);
|
stError("vgId:%d failed prepare load all tasks, code:out of memory", vgId);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
vgId = pMeta->vgId;
|
|
||||||
stInfo("vgId:%d load stream tasks from meta files", vgId);
|
stInfo("vgId:%d load stream tasks from meta files", vgId);
|
||||||
|
|
||||||
code = tdbTbcOpen(pMeta->pTaskDb, &pCur, NULL);
|
code = tdbTbcOpen(pMeta->pTaskDb, &pCur, NULL);
|
||||||
|
@ -1062,9 +1106,9 @@ void streamMetaLoadAllTasks(SStreamMeta* pMeta) {
|
||||||
|
|
||||||
if (pTask->status.taskStatus == TASK_STATUS__DROPPING) {
|
if (pTask->status.taskStatus == TASK_STATUS__DROPPING) {
|
||||||
int32_t taskId = pTask->id.taskId;
|
int32_t taskId = pTask->id.taskId;
|
||||||
tFreeStreamTask(pTask);
|
|
||||||
|
|
||||||
STaskId id = streamTaskGetTaskId(pTask);
|
STaskId id = streamTaskGetTaskId(pTask);
|
||||||
|
|
||||||
|
tFreeStreamTask(pTask);
|
||||||
void* px = taosArrayPush(pRecycleList, &id);
|
void* px = taosArrayPush(pRecycleList, &id);
|
||||||
if (px == NULL) {
|
if (px == NULL) {
|
||||||
stError("s-task:0x%x failed record the task into recycle list due to out of memory", taskId);
|
stError("s-task:0x%x failed record the task into recycle list due to out of memory", taskId);
|
||||||
|
@ -1100,13 +1144,25 @@ void streamMetaLoadAllTasks(SStreamMeta* pMeta) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (taosHashPut(pMeta->pTasksMap, &id, sizeof(id), &pTask, POINTER_BYTES) != 0) {
|
pTask->id.refId = taosAddRef(streamTaskRefPool, pTask);
|
||||||
stError("s-task:0x%x failed to put into hashTable, code:%s, continue", pTask->id.taskId, tstrerror(terrno));
|
|
||||||
void* px = taosArrayPop(pMeta->pTaskList);
|
if (taosHashPut(pMeta->pTasksMap, &id, sizeof(id), &pTask->id.refId, sizeof(int64_t)) != 0) {
|
||||||
tFreeStreamTask(pTask);
|
int64_t refId = pTask->id.refId;
|
||||||
|
stError("s-task:0x%x failed to put into hashTable, code:%s, remove task ref, refId:%" PRId64 " continue",
|
||||||
|
pTask->id.taskId, tstrerror(terrno), refId);
|
||||||
|
|
||||||
|
void* px = taosArrayPop(pMeta->pTaskList);
|
||||||
|
int32_t ret = taosRemoveRef(streamTaskRefPool, refId);
|
||||||
|
if (ret != 0) {
|
||||||
|
stError("s-task:0x%x failed to remove ref, refId:%" PRId64, (int32_t)id.taskId, refId);
|
||||||
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// enable the scheduler for stream tasks after acquire the task RefId.
|
||||||
|
streamSetupScheduleTrigger(pTask);
|
||||||
|
|
||||||
|
stInfo("s-task:0x%x vgId:%d set refId:%"PRId64, (int32_t) id.taskId, vgId, pTask->id.refId);
|
||||||
if (pTask->info.fillHistory == 0) {
|
if (pTask->info.fillHistory == 0) {
|
||||||
int32_t val = atomic_add_fetch_32(&pMeta->numOfStreamTasks, 1);
|
int32_t val = atomic_add_fetch_32(&pMeta->numOfStreamTasks, 1);
|
||||||
}
|
}
|
||||||
|
@ -1142,72 +1198,22 @@ void streamMetaLoadAllTasks(SStreamMeta* pMeta) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool streamMetaTaskInTimer(SStreamMeta* pMeta) {
|
|
||||||
bool inTimer = false;
|
|
||||||
streamMetaRLock(pMeta);
|
|
||||||
|
|
||||||
void* pIter = NULL;
|
|
||||||
while (1) {
|
|
||||||
pIter = taosHashIterate(pMeta->pTasksMap, pIter);
|
|
||||||
if (pIter == NULL) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
SStreamTask* pTask = *(SStreamTask**)pIter;
|
|
||||||
if (pTask->status.timerActive >= 1) {
|
|
||||||
stDebug("s-task:%s in timer, blocking tasks in vgId:%d restart, set closing again", pTask->id.idStr, pMeta->vgId);
|
|
||||||
int32_t code = streamTaskStop(pTask);
|
|
||||||
if (code) {
|
|
||||||
stError("s-task:%s failed to stop task, code:%s", pTask->id.idStr, tstrerror(code));
|
|
||||||
}
|
|
||||||
inTimer = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
streamMetaRUnLock(pMeta);
|
|
||||||
return inTimer;
|
|
||||||
}
|
|
||||||
|
|
||||||
void streamMetaNotifyClose(SStreamMeta* pMeta) {
|
void streamMetaNotifyClose(SStreamMeta* pMeta) {
|
||||||
int32_t vgId = pMeta->vgId;
|
int32_t vgId = pMeta->vgId;
|
||||||
int64_t startTs = 0;
|
int64_t startTs = 0;
|
||||||
int32_t sendCount = 0;
|
int32_t sendCount = 0;
|
||||||
streamMetaGetHbSendInfo(pMeta->pHbInfo, &startTs, &sendCount);
|
|
||||||
|
|
||||||
|
streamMetaGetHbSendInfo(pMeta->pHbInfo, &startTs, &sendCount);
|
||||||
stInfo("vgId:%d notify all stream tasks that current vnode is closing. isLeader:%d startHb:%" PRId64 ", totalHb:%d",
|
stInfo("vgId:%d notify all stream tasks that current vnode is closing. isLeader:%d startHb:%" PRId64 ", totalHb:%d",
|
||||||
vgId, (pMeta->role == NODE_ROLE_LEADER), startTs, sendCount);
|
vgId, (pMeta->role == NODE_ROLE_LEADER), startTs, sendCount);
|
||||||
|
|
||||||
// wait for the stream meta hb function stopping
|
// wait for the stream meta hb function stopping
|
||||||
streamMetaWaitForHbTmrQuit(pMeta);
|
streamMetaWaitForHbTmrQuit(pMeta);
|
||||||
|
|
||||||
streamMetaWLock(pMeta);
|
|
||||||
|
|
||||||
pMeta->closeFlag = true;
|
pMeta->closeFlag = true;
|
||||||
void* pIter = NULL;
|
|
||||||
while (1) {
|
|
||||||
pIter = taosHashIterate(pMeta->pTasksMap, pIter);
|
|
||||||
if (pIter == NULL) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
SStreamTask* pTask = *(SStreamTask**)pIter;
|
|
||||||
stDebug("vgId:%d s-task:%s set task closing flag", vgId, pTask->id.idStr);
|
|
||||||
int32_t code = streamTaskStop(pTask);
|
|
||||||
if (code) {
|
|
||||||
stError("vgId:%d failed to stop task:0x%x, code:%s", vgId, pTask->id.taskId, tstrerror(code));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
streamMetaWUnLock(pMeta);
|
|
||||||
|
|
||||||
stDebug("vgId:%d start to check all tasks for closing", vgId);
|
stDebug("vgId:%d start to check all tasks for closing", vgId);
|
||||||
int64_t st = taosGetTimestampMs();
|
int64_t st = taosGetTimestampMs();
|
||||||
|
|
||||||
while (streamMetaTaskInTimer(pMeta)) {
|
|
||||||
stDebug("vgId:%d some tasks in timer, wait for 100ms and recheck", pMeta->vgId);
|
|
||||||
taosMsleep(100);
|
|
||||||
}
|
|
||||||
|
|
||||||
streamMetaRLock(pMeta);
|
streamMetaRLock(pMeta);
|
||||||
|
|
||||||
SArray* pTaskList = NULL;
|
SArray* pTaskList = NULL;
|
||||||
|
@ -1215,14 +1221,34 @@ void streamMetaNotifyClose(SStreamMeta* pMeta) {
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMetaRUnLock(pMeta);
|
int32_t numOfTasks = taosArrayGetSize(pTaskList);
|
||||||
|
for (int32_t i = 0; i < numOfTasks; ++i) {
|
||||||
|
SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i);
|
||||||
|
SStreamTask* pTask = NULL;
|
||||||
|
|
||||||
if (pTaskList != NULL) {
|
code = streamMetaAcquireTaskNoLock(pMeta, pTaskId->streamId, pTaskId->taskId, &pTask);
|
||||||
taosArrayDestroy(pTaskList);
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t refId = pTask->id.refId;
|
||||||
|
int32_t ret = streamTaskStop(pTask);
|
||||||
|
if (ret) {
|
||||||
|
stError("s-task:0x%x failed to stop task, code:%s", pTaskId->taskId, tstrerror(ret));
|
||||||
|
}
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
ret = taosRemoveRef(streamTaskRefPool, refId);
|
||||||
|
if (ret) {
|
||||||
|
stError("vgId:%d failed to remove task:0x%x, refId:%" PRId64, pMeta->vgId, pTaskId->taskId, refId);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t el = taosGetTimestampMs() - st;
|
taosArrayDestroy(pTaskList);
|
||||||
stDebug("vgId:%d all stream tasks are not in timer, continue close, elapsed time:%" PRId64 " ms", pMeta->vgId, el);
|
|
||||||
|
double el = (taosGetTimestampMs() - st) / 1000.0;
|
||||||
|
stDebug("vgId:%d stop all %d task(s) completed, elapsed time:%.2f Sec.", pMeta->vgId, numOfTasks, el);
|
||||||
|
streamMetaRUnLock(pMeta);
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamMetaStartHb(SStreamMeta* pMeta) {
|
void streamMetaStartHb(SStreamMeta* pMeta) {
|
||||||
|
@ -1232,12 +1258,12 @@ void streamMetaStartHb(SStreamMeta* pMeta) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
*pRid = pMeta->rid;
|
||||||
int32_t code = metaRefMgtAdd(pMeta->vgId, pRid);
|
int32_t code = metaRefMgtAdd(pMeta->vgId, pRid);
|
||||||
if (code) {
|
if (code) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
*pRid = pMeta->rid;
|
|
||||||
streamMetaHbToMnode(pRid, NULL);
|
streamMetaHbToMnode(pRid, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1312,13 +1338,15 @@ bool streamMetaAllTasksReady(const SStreamMeta* pMeta) {
|
||||||
for (int32_t i = 0; i < num; ++i) {
|
for (int32_t i = 0; i < num; ++i) {
|
||||||
SStreamTaskId* pId = taosArrayGet(pMeta->pTaskList, i);
|
SStreamTaskId* pId = taosArrayGet(pMeta->pTaskList, i);
|
||||||
STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId};
|
STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId};
|
||||||
SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
SStreamTask* pTask = NULL;
|
||||||
if (ppTask == NULL) {
|
int32_t code = streamMetaAcquireTaskUnsafe((SStreamMeta*)pMeta, &id, &pTask);
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((*ppTask)->status.downstreamReady == 0) {
|
if (code == 0) {
|
||||||
return false;
|
if (pTask->status.downstreamReady == 0) {
|
||||||
|
streamMetaReleaseTask((SStreamMeta*)pMeta, pTask);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
streamMetaReleaseTask((SStreamMeta*)pMeta, pTask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1335,10 +1363,13 @@ int32_t streamMetaResetTaskStatus(SStreamMeta* pMeta) {
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfTasks; ++i) {
|
for (int32_t i = 0; i < numOfTasks; ++i) {
|
||||||
SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i);
|
SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i);
|
||||||
|
STaskId id = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId};
|
||||||
STaskId id = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId};
|
SStreamTask* pTask = NULL;
|
||||||
SStreamTask** pTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
int32_t code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask);
|
||||||
streamTaskResetStatus(*pTask);
|
if (code == 0) {
|
||||||
|
streamTaskResetStatus(pTask);
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1347,7 +1378,7 @@ int32_t streamMetaResetTaskStatus(SStreamMeta* pMeta) {
|
||||||
void streamMetaAddIntoUpdateTaskList(SStreamMeta* pMeta, SStreamTask* pTask, SStreamTask* pHTask, int32_t transId,
|
void streamMetaAddIntoUpdateTaskList(SStreamMeta* pMeta, SStreamTask* pTask, SStreamTask* pHTask, int32_t transId,
|
||||||
int64_t startTs) {
|
int64_t startTs) {
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
int32_t vgId = pTask->pMeta->vgId;
|
int32_t vgId = pMeta->vgId;
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
|
||||||
// keep the already updated info
|
// keep the already updated info
|
||||||
|
|
|
@ -20,15 +20,18 @@ static void streamTaskResumeHelper(void* param, void* tmrId);
|
||||||
static void streamTaskSchedHelper(void* param, void* tmrId);
|
static void streamTaskSchedHelper(void* param, void* tmrId);
|
||||||
|
|
||||||
void streamSetupScheduleTrigger(SStreamTask* pTask) {
|
void streamSetupScheduleTrigger(SStreamTask* pTask) {
|
||||||
int64_t delaySchema = pTask->info.delaySchedParam;
|
int64_t delayParam = pTask->info.delaySchedParam;
|
||||||
if (delaySchema != 0 && pTask->info.fillHistory == 0) {
|
if (delayParam != 0 && pTask->info.fillHistory == 0) {
|
||||||
int32_t ref = streamMetaAcquireOneTask(pTask);
|
int64_t* pTaskRefId = NULL;
|
||||||
stDebug("s-task:%s setup scheduler trigger, ref:%d delay:%" PRId64 " ms", pTask->id.idStr, ref,
|
int32_t code = streamTaskAllocRefId(pTask, &pTaskRefId);
|
||||||
pTask->info.delaySchedParam);
|
if (code == 0) {
|
||||||
|
stDebug("s-task:%s refId:%" PRId64 " enable the scheduler trigger, delay:%" PRId64, pTask->id.idStr,
|
||||||
|
pTask->id.refId, delayParam);
|
||||||
|
|
||||||
streamTmrStart(streamTaskSchedHelper, (int32_t)delaySchema, pTask, streamTimer, &pTask->schedInfo.pDelayTimer,
|
streamTmrStart(streamTaskSchedHelper, (int32_t)delayParam, pTaskRefId, streamTimer,
|
||||||
pTask->pMeta->vgId, "sched-tmr");
|
&pTask->schedInfo.pDelayTimer, pTask->pMeta->vgId, "sched-tmr");
|
||||||
pTask->schedInfo.status = TASK_TRIGGER_STATUS__INACTIVE;
|
pTask->schedInfo.status = TASK_TRIGGER_STATUS__INACTIVE;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,49 +78,67 @@ void streamTaskClearSchedIdleInfo(SStreamTask* pTask) { pTask->status.schedIdleT
|
||||||
void streamTaskSetIdleInfo(SStreamTask* pTask, int32_t idleTime) { pTask->status.schedIdleTime = idleTime; }
|
void streamTaskSetIdleInfo(SStreamTask* pTask, int32_t idleTime) { pTask->status.schedIdleTime = idleTime; }
|
||||||
|
|
||||||
void streamTaskResumeInFuture(SStreamTask* pTask) {
|
void streamTaskResumeInFuture(SStreamTask* pTask) {
|
||||||
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
stDebug("s-task:%s task should idle, add into timer to retry in %dms", pTask->id.idStr,
|
||||||
stDebug("s-task:%s task should idle, add into timer to retry in %dms, ref:%d", pTask->id.idStr,
|
pTask->status.schedIdleTime);
|
||||||
pTask->status.schedIdleTime, ref);
|
|
||||||
|
|
||||||
// add one ref count for task
|
// add one ref count for task
|
||||||
int32_t unusedRetRef = streamMetaAcquireOneTask(pTask);
|
int64_t* pTaskRefId = NULL;
|
||||||
streamTmrStart(streamTaskResumeHelper, pTask->status.schedIdleTime, pTask, streamTimer, &pTask->schedInfo.pIdleTimer,
|
int32_t code = streamTaskAllocRefId(pTask, &pTaskRefId);
|
||||||
pTask->pMeta->vgId, "resume-task-tmr");
|
if (code == 0) {
|
||||||
|
streamTmrStart(streamTaskResumeHelper, pTask->status.schedIdleTime, pTaskRefId, streamTimer,
|
||||||
|
&pTask->schedInfo.pIdleTimer, pTask->pMeta->vgId, "resume-task-tmr");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
void streamTaskResumeHelper(void* param, void* tmrId) {
|
void streamTaskResumeHelper(void* param, void* tmrId) {
|
||||||
SStreamTask* pTask = (SStreamTask*)param;
|
int32_t code = 0;
|
||||||
|
int64_t taskRefId = *(int64_t*)param;
|
||||||
|
SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId);
|
||||||
|
if (pTask == NULL) {
|
||||||
|
stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__);
|
||||||
|
streamTaskFreeRefId(param);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
SStreamTaskId* pId = &pTask->id;
|
SStreamTaskId* pId = &pTask->id;
|
||||||
SStreamTaskState p = streamTaskGetStatus(pTask);
|
SStreamTaskState p = streamTaskGetStatus(pTask);
|
||||||
int32_t code = 0;
|
|
||||||
|
|
||||||
if (p.state == TASK_STATUS__DROPPING || p.state == TASK_STATUS__STOP) {
|
if (p.state == TASK_STATUS__DROPPING || p.state == TASK_STATUS__STOP) {
|
||||||
int8_t status = streamTaskSetSchedStatusInactive(pTask);
|
int8_t status = streamTaskSetSchedStatusInactive(pTask);
|
||||||
TAOS_UNUSED(status);
|
TAOS_UNUSED(status);
|
||||||
|
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
stDebug("s-task:%s status:%s not resume task", pId->idStr, p.name);
|
||||||
stDebug("s-task:%s status:%s not resume task, ref:%d", pId->idStr, p.name, ref);
|
|
||||||
|
|
||||||
streamMetaReleaseTask(pTask->pMeta, pTask);
|
streamMetaReleaseTask(pTask->pMeta, pTask);
|
||||||
|
streamTaskFreeRefId(param);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = streamTaskSchedTask(pTask->pMsgCb, pTask->info.nodeId, pId->streamId, pId->taskId, STREAM_EXEC_T_RESUME_TASK);
|
code = streamTaskSchedTask(pTask->pMsgCb, pTask->info.nodeId, pId->streamId, pId->taskId, STREAM_EXEC_T_RESUME_TASK);
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
|
||||||
if (code) {
|
if (code) {
|
||||||
stError("s-task:%s sched task failed, code:%s, ref:%d", pId->idStr, tstrerror(code), ref);
|
stError("s-task:%s sched task failed, code:%s", pId->idStr, tstrerror(code));
|
||||||
} else {
|
} else {
|
||||||
stDebug("trigger to resume s-task:%s after idled for %dms, ref:%d", pId->idStr, pTask->status.schedIdleTime, ref);
|
stDebug("trigger to resume s-task:%s after idled for %dms", pId->idStr, pTask->status.schedIdleTime);
|
||||||
|
|
||||||
// release the task ref count
|
// release the task ref count
|
||||||
streamTaskClearSchedIdleInfo(pTask);
|
streamTaskClearSchedIdleInfo(pTask);
|
||||||
streamMetaReleaseTask(pTask->pMeta, pTask);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pTask->pMeta, pTask);
|
||||||
|
streamTaskFreeRefId(param);
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamTaskSchedHelper(void* param, void* tmrId) {
|
void streamTaskSchedHelper(void* param, void* tmrId) {
|
||||||
SStreamTask* pTask = (void*)param;
|
int64_t taskRefId = *(int64_t*)param;
|
||||||
|
SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId);
|
||||||
|
if (pTask == NULL) {
|
||||||
|
stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__);
|
||||||
|
streamTaskFreeRefId(param);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
stDebug("s-task:%s acquire task, refId:%"PRId64, pTask->id.idStr, pTask->id.refId);
|
||||||
|
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
int32_t nextTrigger = (int32_t)pTask->info.delaySchedParam;
|
int32_t nextTrigger = (int32_t)pTask->info.delaySchedParam;
|
||||||
int32_t vgId = pTask->pMeta->vgId;
|
int32_t vgId = pTask->pMeta->vgId;
|
||||||
|
@ -125,8 +146,18 @@ void streamTaskSchedHelper(void* param, void* tmrId) {
|
||||||
int8_t status = atomic_load_8(&pTask->schedInfo.status);
|
int8_t status = atomic_load_8(&pTask->schedInfo.status);
|
||||||
stTrace("s-task:%s in scheduler, trigger status:%d, next:%dms", id, status, nextTrigger);
|
stTrace("s-task:%s in scheduler, trigger status:%d, next:%dms", id, status, nextTrigger);
|
||||||
|
|
||||||
if (streamTaskShouldStop(pTask) || streamTaskShouldPause(pTask)) {
|
if (streamTaskShouldStop(pTask)) {
|
||||||
stDebug("s-task:%s should stop, jump out of schedTimer", id);
|
stDebug("s-task:%s should stop, jump out of schedTimer", id);
|
||||||
|
streamMetaReleaseTask(pTask->pMeta, pTask);
|
||||||
|
streamTaskFreeRefId(param);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (streamTaskShouldPause(pTask)) {
|
||||||
|
stDebug("s-task:%s is paused, recheck in %.2fs", id, nextTrigger/1000.0);
|
||||||
|
streamTmrStart(streamTaskSchedHelper, nextTrigger, param, streamTimer, &pTask->schedInfo.pDelayTimer, vgId,
|
||||||
|
"sched-run-tmr");
|
||||||
|
streamMetaReleaseTask(pTask->pMeta, pTask);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -171,6 +202,7 @@ void streamTaskSchedHelper(void* param, void* tmrId) {
|
||||||
}
|
}
|
||||||
|
|
||||||
_end:
|
_end:
|
||||||
streamTmrStart(streamTaskSchedHelper, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pDelayTimer, vgId,
|
streamTmrStart(streamTaskSchedHelper, nextTrigger, param, streamTimer, &pTask->schedInfo.pDelayTimer, vgId,
|
||||||
"sched-run-tmr");
|
"sched-run-tmr");
|
||||||
|
streamMetaReleaseTask(pTask->pMeta, pTask);
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
|
|
||||||
#include "streamInt.h"
|
#include "streamInt.h"
|
||||||
#include "streamsm.h"
|
#include "streamsm.h"
|
||||||
|
#include "tref.h"
|
||||||
#include "trpc.h"
|
#include "trpc.h"
|
||||||
#include "ttimer.h"
|
#include "ttimer.h"
|
||||||
#include "wal.h"
|
#include "wal.h"
|
||||||
|
@ -24,7 +25,7 @@
|
||||||
#define SCANHISTORY_IDLE_TICK ((SCANHISTORY_MAX_IDLE_TIME * 1000) / SCANHISTORY_IDLE_TIME_SLICE)
|
#define SCANHISTORY_IDLE_TICK ((SCANHISTORY_MAX_IDLE_TIME * 1000) / SCANHISTORY_IDLE_TIME_SLICE)
|
||||||
|
|
||||||
typedef struct SLaunchHTaskInfo {
|
typedef struct SLaunchHTaskInfo {
|
||||||
SStreamMeta* pMeta;
|
int64_t metaRid;
|
||||||
STaskId id;
|
STaskId id;
|
||||||
STaskId hTaskId;
|
STaskId hTaskId;
|
||||||
} SLaunchHTaskInfo;
|
} SLaunchHTaskInfo;
|
||||||
|
@ -87,21 +88,15 @@ void streamExecScanHistoryInFuture(SStreamTask* pTask, int32_t idleDuration) {
|
||||||
numOfTicks = SCANHISTORY_IDLE_TICK;
|
numOfTicks = SCANHISTORY_IDLE_TICK;
|
||||||
}
|
}
|
||||||
|
|
||||||
// add ref for task
|
|
||||||
SStreamTask* p = NULL;
|
|
||||||
int32_t code = streamMetaAcquireTask(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, &p);
|
|
||||||
if (p == NULL || code != 0) {
|
|
||||||
stError("s-task:0x%x failed to acquire task, status:%s, not exec scan-history data", pTask->id.taskId,
|
|
||||||
streamTaskGetStatus(pTask).name);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
pTask->schedHistoryInfo.numOfTicks = numOfTicks;
|
pTask->schedHistoryInfo.numOfTicks = numOfTicks;
|
||||||
|
|
||||||
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
stDebug("s-task:%s scan-history resumed in %.2fs", pTask->id.idStr, numOfTicks * 0.1);
|
||||||
stDebug("s-task:%s scan-history resumed in %.2fs, ref:%d", pTask->id.idStr, numOfTicks * 0.1, ref);
|
int64_t* pTaskRefId = NULL;
|
||||||
streamTmrStart(doExecScanhistoryInFuture, SCANHISTORY_IDLE_TIME_SLICE, pTask, streamTimer,
|
int32_t ret = streamTaskAllocRefId(pTask, &pTaskRefId);
|
||||||
&pTask->schedHistoryInfo.pTimer, vgId, "history-task");
|
if (ret == 0) {
|
||||||
|
streamTmrStart(doExecScanhistoryInFuture, SCANHISTORY_IDLE_TIME_SLICE, pTaskRefId, streamTimer,
|
||||||
|
&pTask->schedHistoryInfo.pTimer, vgId, "history-task");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamTaskStartScanHistory(SStreamTask* pTask) {
|
int32_t streamTaskStartScanHistory(SStreamTask* pTask) {
|
||||||
|
@ -220,42 +215,32 @@ int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) {
|
||||||
|
|
||||||
// Set the execution conditions, including the query time window and the version range
|
// Set the execution conditions, including the query time window and the version range
|
||||||
streamMetaRLock(pMeta);
|
streamMetaRLock(pMeta);
|
||||||
SStreamTask** pHTask = taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id));
|
SStreamTask* pHisTask = NULL;
|
||||||
|
code = streamMetaAcquireTaskUnsafe(pMeta, &pTask->hTaskInfo.id, &pHisTask);
|
||||||
streamMetaRUnLock(pMeta);
|
streamMetaRUnLock(pMeta);
|
||||||
|
|
||||||
if (pHTask != NULL) { // it is already added into stream meta store.
|
if (code == 0) { // it is already added into stream meta store.
|
||||||
SStreamTask* pHisTask = NULL;
|
if (pHisTask->status.downstreamReady == 1) { // it's ready now, do nothing
|
||||||
code = streamMetaAcquireTask(pMeta, hStreamId, hTaskId, &pHisTask);
|
stDebug("s-task:%s fill-history task is ready, no need to check downstream", pHisTask->id.idStr);
|
||||||
if (pHisTask == NULL) {
|
code = streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->checkTs, pExecInfo->readyTs, true);
|
||||||
stDebug("s-task:%s failed acquire and start fill-history task, it may have been dropped/stopped", idStr);
|
|
||||||
code = streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->checkTs, pExecInfo->readyTs, false);
|
|
||||||
if (code) {
|
if (code) {
|
||||||
stError("s-task:%s failed to record start task status, code:%s", idStr, tstrerror(code));
|
stError("s-task:%s failed to record start task status, code:%s", idStr, tstrerror(code));
|
||||||
}
|
}
|
||||||
} else {
|
} else { // exist, but not ready, continue check downstream task status
|
||||||
if (pHisTask->status.downstreamReady == 1) { // it's ready now, do nothing
|
if (pHisTask->pBackend == NULL) {
|
||||||
stDebug("s-task:%s fill-history task is ready, no need to check downstream", pHisTask->id.idStr);
|
code = pMeta->expandTaskFn(pHisTask);
|
||||||
code = streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->checkTs, pExecInfo->readyTs, true);
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
if (code) {
|
streamMetaAddFailedTaskSelf(pHisTask, now);
|
||||||
stError("s-task:%s failed to record start task status, code:%s", idStr, tstrerror(code));
|
stError("s-task:%s failed to expand fill-history task, code:%s", pHisTask->id.idStr, tstrerror(code));
|
||||||
}
|
|
||||||
} else { // exist, but not ready, continue check downstream task status
|
|
||||||
if (pHisTask->pBackend == NULL) {
|
|
||||||
code = pMeta->expandTaskFn(pHisTask);
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
|
||||||
streamMetaAddFailedTaskSelf(pHisTask, now);
|
|
||||||
stError("s-task:%s failed to expand fill-history task, code:%s", pHisTask->id.idStr, tstrerror(code));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (code == TSDB_CODE_SUCCESS) {
|
|
||||||
checkFillhistoryTaskStatus(pTask, pHisTask);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMetaReleaseTask(pMeta, pHisTask);
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
|
checkFillhistoryTaskStatus(pTask, pHisTask);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pMeta, pHisTask);
|
||||||
return code;
|
return code;
|
||||||
} else {
|
} else {
|
||||||
return launchNotBuiltFillHistoryTask(pTask);
|
return launchNotBuiltFillHistoryTask(pTask);
|
||||||
|
@ -296,14 +281,14 @@ void notRetryLaunchFillHistoryTask(SStreamTask* pTask, SLaunchHTaskInfo* pInfo,
|
||||||
SStreamMeta* pMeta = pTask->pMeta;
|
SStreamMeta* pMeta = pTask->pMeta;
|
||||||
SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo;
|
SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo;
|
||||||
|
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
// int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||||
int32_t code = streamMetaAddTaskLaunchResult(pMeta, pInfo->hTaskId.streamId, pInfo->hTaskId.taskId, 0, now, false);
|
int32_t code = streamMetaAddTaskLaunchResult(pMeta, pInfo->hTaskId.streamId, pInfo->hTaskId.taskId, 0, now, false);
|
||||||
|
|
||||||
if (code) {
|
if (code) {
|
||||||
stError("s-task:%s failed to record the start task status, code:%s", pTask->id.idStr, tstrerror(code));
|
stError("s-task:%s failed to record the start task status, code:%s", pTask->id.idStr, tstrerror(code));
|
||||||
} else {
|
} else {
|
||||||
stError("s-task:%s max retry:%d reached, quit from retrying launch related fill-history task:0x%x, ref:%d",
|
stError("s-task:%s max retry:%d reached, quit from retrying launch related fill-history task:0x%x",
|
||||||
pTask->id.idStr, MAX_RETRY_LAUNCH_HISTORY_TASK, (int32_t)pHTaskInfo->id.taskId, ref);
|
pTask->id.idStr, MAX_RETRY_LAUNCH_HISTORY_TASK, (int32_t)pHTaskInfo->id.taskId);
|
||||||
}
|
}
|
||||||
|
|
||||||
pHTaskInfo->id.taskId = 0;
|
pHTaskInfo->id.taskId = 0;
|
||||||
|
@ -315,9 +300,9 @@ void doRetryLaunchFillHistoryTask(SStreamTask* pTask, SLaunchHTaskInfo* pInfo, i
|
||||||
SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo;
|
SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo;
|
||||||
|
|
||||||
if (streamTaskShouldStop(pTask)) { // record the failure
|
if (streamTaskShouldStop(pTask)) { // record the failure
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
// int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||||
stDebug("s-task:0x%" PRIx64 " stopped, not launch rel history task:0x%" PRIx64 ", ref:%d", pInfo->id.taskId,
|
stDebug("s-task:0x%" PRIx64 " stopped, not launch rel history task:0x%" PRIx64, pInfo->id.taskId,
|
||||||
pInfo->hTaskId.taskId, ref);
|
pInfo->hTaskId.taskId);
|
||||||
|
|
||||||
int32_t code = streamMetaAddTaskLaunchResult(pMeta, pInfo->hTaskId.streamId, pInfo->hTaskId.taskId, 0, now, false);
|
int32_t code = streamMetaAddTaskLaunchResult(pMeta, pInfo->hTaskId.streamId, pInfo->hTaskId.taskId, 0, now, false);
|
||||||
if (code) {
|
if (code) {
|
||||||
|
@ -336,30 +321,60 @@ void doRetryLaunchFillHistoryTask(SStreamTask* pTask, SLaunchHTaskInfo* pInfo, i
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void doCleanup(SStreamTask* pTask, int64_t metaRid, SLaunchHTaskInfo* pInfo) {
|
||||||
|
SStreamMeta* pMeta = pTask->pMeta;
|
||||||
|
int32_t vgId = pMeta->vgId;
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
int32_t ret = taosReleaseRef(streamMetaRefPool, metaRid);
|
||||||
|
if (ret) {
|
||||||
|
stError("vgId:%d failed to release meta refId:%"PRId64, vgId, metaRid);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pInfo != NULL) {
|
||||||
|
taosMemoryFree(pInfo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void tryLaunchHistoryTask(void* param, void* tmrId) {
|
void tryLaunchHistoryTask(void* param, void* tmrId) {
|
||||||
SLaunchHTaskInfo* pInfo = param;
|
SLaunchHTaskInfo* pInfo = param;
|
||||||
SStreamMeta* pMeta = pInfo->pMeta;
|
int64_t metaRid = pInfo->metaRid;
|
||||||
int64_t now = taosGetTimestampMs();
|
int64_t now = taosGetTimestampMs();
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
SStreamTask* pTask = NULL;
|
||||||
|
int32_t vgId = 0;
|
||||||
|
|
||||||
|
SStreamMeta* pMeta = taosAcquireRef(streamMetaRefPool, metaRid);
|
||||||
|
if (pMeta == NULL) {
|
||||||
|
stError("invalid meta rid:%" PRId64 " failed to acquired stream-meta", metaRid);
|
||||||
|
taosMemoryFree(pInfo);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
vgId = pMeta->vgId;
|
||||||
|
|
||||||
streamMetaWLock(pMeta);
|
streamMetaWLock(pMeta);
|
||||||
|
|
||||||
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pInfo->id, sizeof(pInfo->id));
|
code = streamMetaAcquireTaskUnsafe(pMeta, &pInfo->id, &pTask);
|
||||||
if (ppTask == NULL || *ppTask == NULL) {
|
if (code != 0) {
|
||||||
stError("s-task:0x%x and rel fill-history task:0x%" PRIx64 " all have been destroyed, not launch",
|
stError("s-task:0x%x and rel fill-history task:0x%" PRIx64 " all have been destroyed, not launch",
|
||||||
(int32_t)pInfo->id.taskId, pInfo->hTaskId.taskId);
|
(int32_t)pInfo->id.taskId, pInfo->hTaskId.taskId);
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
|
|
||||||
|
int32_t ret = taosReleaseRef(streamMetaRefPool, metaRid);
|
||||||
|
if (ret) {
|
||||||
|
stError("vgId:%d failed to release meta refId:%"PRId64, vgId, metaRid);
|
||||||
|
}
|
||||||
|
|
||||||
// already dropped, no need to set the failure info into the stream task meta.
|
// already dropped, no need to set the failure info into the stream task meta.
|
||||||
taosMemoryFree(pInfo);
|
taosMemoryFree(pInfo);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (streamTaskShouldStop(*ppTask)) {
|
if (streamTaskShouldStop(pTask)) {
|
||||||
char* p = streamTaskGetStatus(*ppTask).name;
|
char* p = streamTaskGetStatus(pTask).name;
|
||||||
int32_t ref = atomic_sub_fetch_32(&(*ppTask)->status.timerActive, 1);
|
stDebug("s-task:%s status:%s should stop, quit launch fill-history task timer, retry:%d", pTask->id.idStr, p,
|
||||||
stDebug("s-task:%s status:%s should stop, quit launch fill-history task timer, retry:%d, ref:%d",
|
pTask->hTaskInfo.retryTimes);
|
||||||
(*ppTask)->id.idStr, p, (*ppTask)->hTaskInfo.retryTimes, ref);
|
|
||||||
|
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
|
|
||||||
|
@ -369,77 +384,54 @@ void tryLaunchHistoryTask(void* param, void* tmrId) {
|
||||||
stError("s-task:0x%" PRId64 " failed to record the start task status, code:%s", pInfo->hTaskId.taskId,
|
stError("s-task:0x%" PRId64 " failed to record the start task status, code:%s", pInfo->hTaskId.taskId,
|
||||||
tstrerror(code));
|
tstrerror(code));
|
||||||
}
|
}
|
||||||
taosMemoryFree(pInfo);
|
|
||||||
|
doCleanup(pTask, metaRid, pInfo);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamTask* pTask = NULL;
|
|
||||||
code = streamMetaAcquireTaskNoLock(pMeta, pInfo->id.streamId, pInfo->id.taskId, &pTask);
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
|
||||||
// todo
|
|
||||||
}
|
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
|
|
||||||
if (pTask != NULL) {
|
SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo;
|
||||||
SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo;
|
pHTaskInfo->tickCount -= 1;
|
||||||
|
if (pHTaskInfo->tickCount > 0) {
|
||||||
pHTaskInfo->tickCount -= 1;
|
streamTmrStart(tryLaunchHistoryTask, LAUNCH_HTASK_INTERVAL, pInfo, streamTimer, &pHTaskInfo->pTimer,
|
||||||
if (pHTaskInfo->tickCount > 0) {
|
pTask->pMeta->vgId, " start-history-task-tmr");
|
||||||
streamTmrStart(tryLaunchHistoryTask, LAUNCH_HTASK_INTERVAL, pInfo, streamTimer, &pHTaskInfo->pTimer,
|
doCleanup(pTask, metaRid, NULL);
|
||||||
pTask->pMeta->vgId, " start-history-task-tmr");
|
return;
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pHTaskInfo->retryTimes > MAX_RETRY_LAUNCH_HISTORY_TASK) {
|
|
||||||
notRetryLaunchFillHistoryTask(pTask, pInfo, now);
|
|
||||||
} else { // not reach the limitation yet, let's continue retrying launch related fill-history task.
|
|
||||||
streamTaskSetRetryInfoForLaunch(pHTaskInfo);
|
|
||||||
if (pTask->status.timerActive < 1) {
|
|
||||||
stError("s-task:%s invalid timerActive recorder:%d, abort timer", pTask->id.idStr, pTask->status.timerActive);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// abort the timer if intend to stop task
|
|
||||||
SStreamTask* pHTask = NULL;
|
|
||||||
code = streamMetaAcquireTask(pMeta, pHTaskInfo->id.streamId, pHTaskInfo->id.taskId, &pHTask);
|
|
||||||
if (pHTask == NULL) {
|
|
||||||
doRetryLaunchFillHistoryTask(pTask, pInfo, now);
|
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
|
||||||
return;
|
|
||||||
} else {
|
|
||||||
if (pHTask->pBackend == NULL) {
|
|
||||||
code = pMeta->expandTaskFn(pHTask);
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
|
||||||
streamMetaAddFailedTaskSelf(pHTask, now);
|
|
||||||
stError("failed to expand fill-history task:%s, code:%s", pHTask->id.idStr, tstrerror(code));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (code == TSDB_CODE_SUCCESS) {
|
|
||||||
checkFillhistoryTaskStatus(pTask, pHTask);
|
|
||||||
// not in timer anymore
|
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
|
||||||
stDebug("s-task:0x%x fill-history task launch completed, retry times:%d, ref:%d", (int32_t)pInfo->id.taskId,
|
|
||||||
pHTaskInfo->retryTimes, ref);
|
|
||||||
}
|
|
||||||
streamMetaReleaseTask(pMeta, pHTask);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
|
||||||
} else {
|
|
||||||
code = streamMetaAddTaskLaunchResult(pMeta, pInfo->hTaskId.streamId, pInfo->hTaskId.taskId, 0, now, false);
|
|
||||||
if (code) {
|
|
||||||
stError("s-task:%s failed to record the start task status, code:%s", pTask->id.idStr, tstrerror(code));
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t ref = atomic_sub_fetch_32(&(*ppTask)->status.timerActive, 1);
|
|
||||||
stError("s-task:0x%x rel fill-history task:0x%" PRIx64 " may have been destroyed, not launch, ref:%d",
|
|
||||||
(int32_t)pInfo->id.taskId, pInfo->hTaskId.taskId, ref);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
taosMemoryFree(pInfo);
|
if (pHTaskInfo->retryTimes > MAX_RETRY_LAUNCH_HISTORY_TASK) {
|
||||||
|
notRetryLaunchFillHistoryTask(pTask, pInfo, now);
|
||||||
|
} else { // not reach the limitation yet, let's continue retrying launch related fill-history task.
|
||||||
|
streamTaskSetRetryInfoForLaunch(pHTaskInfo);
|
||||||
|
|
||||||
|
// abort the timer if intend to stop task
|
||||||
|
SStreamTask* pHTask = NULL;
|
||||||
|
code = streamMetaAcquireTask(pMeta, pHTaskInfo->id.streamId, pHTaskInfo->id.taskId, &pHTask);
|
||||||
|
if (pHTask == NULL) {
|
||||||
|
doRetryLaunchFillHistoryTask(pTask, pInfo, now);
|
||||||
|
doCleanup(pTask, metaRid, NULL);
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
if (pHTask->pBackend == NULL) {
|
||||||
|
code = pMeta->expandTaskFn(pHTask);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
streamMetaAddFailedTaskSelf(pHTask, now);
|
||||||
|
stError("failed to expand fill-history task:%s, code:%s", pHTask->id.idStr, tstrerror(code));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
|
checkFillhistoryTaskStatus(pTask, pHTask);
|
||||||
|
// not in timer anymore
|
||||||
|
stDebug("s-task:0x%x fill-history task launch completed, retry times:%d", (int32_t)pInfo->id.taskId,
|
||||||
|
pHTaskInfo->retryTimes);
|
||||||
|
}
|
||||||
|
streamMetaReleaseTask(pMeta, pHTask);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
doCleanup(pTask, metaRid, pInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t createHTaskLaunchInfo(SStreamMeta* pMeta, STaskId* pTaskId, int64_t hStreamId, int32_t hTaskId,
|
int32_t createHTaskLaunchInfo(SStreamMeta* pMeta, STaskId* pTaskId, int64_t hStreamId, int32_t hTaskId,
|
||||||
|
@ -455,7 +447,7 @@ int32_t createHTaskLaunchInfo(SStreamMeta* pMeta, STaskId* pTaskId, int64_t hStr
|
||||||
(*pInfo)->hTaskId.streamId = hStreamId;
|
(*pInfo)->hTaskId.streamId = hStreamId;
|
||||||
(*pInfo)->hTaskId.taskId = hTaskId;
|
(*pInfo)->hTaskId.taskId = hTaskId;
|
||||||
|
|
||||||
(*pInfo)->pMeta = pMeta;
|
(*pInfo)->metaRid = pMeta->rid;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -485,12 +477,10 @@ int32_t launchNotBuiltFillHistoryTask(SStreamTask* pTask) {
|
||||||
|
|
||||||
// check for the timer
|
// check for the timer
|
||||||
if (pTask->hTaskInfo.pTimer == NULL) {
|
if (pTask->hTaskInfo.pTimer == NULL) {
|
||||||
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
|
||||||
pTask->hTaskInfo.pTimer = taosTmrStart(tryLaunchHistoryTask, WAIT_FOR_MINIMAL_INTERVAL, pInfo, streamTimer);
|
pTask->hTaskInfo.pTimer = taosTmrStart(tryLaunchHistoryTask, WAIT_FOR_MINIMAL_INTERVAL, pInfo, streamTimer);
|
||||||
|
|
||||||
if (pTask->hTaskInfo.pTimer == NULL) {
|
if (pTask->hTaskInfo.pTimer == NULL) {
|
||||||
ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
stError("s-task:%s failed to start timer, related fill-history task not launched", idStr);
|
||||||
stError("s-task:%s failed to start timer, related fill-history task not launched, ref:%d", idStr, ref);
|
|
||||||
|
|
||||||
taosMemoryFree(pInfo);
|
taosMemoryFree(pInfo);
|
||||||
code = streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->checkTs, pExecInfo->readyTs, false);
|
code = streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->checkTs, pExecInfo->readyTs, false);
|
||||||
|
@ -500,18 +490,8 @@ int32_t launchNotBuiltFillHistoryTask(SStreamTask* pTask) {
|
||||||
return terrno;
|
return terrno;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ref < 1) {
|
stDebug("s-task:%s set timer active flag", idStr);
|
||||||
stError("s-task:%s invalid timerActive recorder:%d, abort timer", pTask->id.idStr, pTask->status.timerActive);
|
|
||||||
return TSDB_CODE_STREAM_INTERNAL_ERROR;
|
|
||||||
}
|
|
||||||
|
|
||||||
stDebug("s-task:%s set timer active flag, ref:%d", idStr, ref);
|
|
||||||
} else { // timer exists
|
} else { // timer exists
|
||||||
if (pTask->status.timerActive < 1) {
|
|
||||||
stError("s-task:%s invalid timerActive recorder:%d, abort timer", pTask->id.idStr, pTask->status.timerActive);
|
|
||||||
return TSDB_CODE_STREAM_INTERNAL_ERROR;
|
|
||||||
}
|
|
||||||
|
|
||||||
stDebug("s-task:%s set timer active flag, task timer not null", idStr);
|
stDebug("s-task:%s set timer active flag, task timer not null", idStr);
|
||||||
streamTmrStart(tryLaunchHistoryTask, WAIT_FOR_MINIMAL_INTERVAL, pInfo, streamTimer, &pTask->hTaskInfo.pTimer,
|
streamTmrStart(tryLaunchHistoryTask, WAIT_FOR_MINIMAL_INTERVAL, pInfo, streamTimer, &pTask->hTaskInfo.pTimer,
|
||||||
pTask->pMeta->vgId, " start-history-task-tmr");
|
pTask->pMeta->vgId, " start-history-task-tmr");
|
||||||
|
@ -590,15 +570,22 @@ int32_t streamTaskSetRangeStreamCalc(SStreamTask* pTask) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void doExecScanhistoryInFuture(void* param, void* tmrId) {
|
void doExecScanhistoryInFuture(void* param, void* tmrId) {
|
||||||
SStreamTask* pTask = param;
|
int64_t taskRefId = *(int64_t*) param;
|
||||||
|
|
||||||
|
SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId);
|
||||||
|
if (pTask == NULL) {
|
||||||
|
stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__);
|
||||||
|
streamTaskFreeRefId(param);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
pTask->schedHistoryInfo.numOfTicks -= 1;
|
pTask->schedHistoryInfo.numOfTicks -= 1;
|
||||||
|
|
||||||
SStreamTaskState p = streamTaskGetStatus(pTask);
|
SStreamTaskState p = streamTaskGetStatus(pTask);
|
||||||
if (p.state == TASK_STATUS__DROPPING || p.state == TASK_STATUS__STOP) {
|
if (p.state == TASK_STATUS__DROPPING || p.state == TASK_STATUS__STOP) {
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
stDebug("s-task:%s status:%s not start scan-history again", pTask->id.idStr, p.name);
|
||||||
stDebug("s-task:%s status:%s not start scan-history again, ref:%d", pTask->id.idStr, p.name, ref);
|
|
||||||
|
|
||||||
streamMetaReleaseTask(pTask->pMeta, pTask);
|
streamMetaReleaseTask(pTask->pMeta, pTask);
|
||||||
|
streamTaskFreeRefId(param);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -608,16 +595,19 @@ void doExecScanhistoryInFuture(void* param, void* tmrId) {
|
||||||
stError("s-task:%s async start history task failed", pTask->id.idStr);
|
stError("s-task:%s async start history task failed", pTask->id.idStr);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
stDebug("s-task:%s fill-history:%d start scan-history data, out of tmr", pTask->id.idStr,
|
||||||
stDebug("s-task:%s fill-history:%d start scan-history data, out of tmr, ref:%d", pTask->id.idStr,
|
pTask->info.fillHistory);
|
||||||
pTask->info.fillHistory, ref);
|
|
||||||
|
|
||||||
// release the task.
|
|
||||||
streamMetaReleaseTask(pTask->pMeta, pTask);
|
|
||||||
} else {
|
} else {
|
||||||
streamTmrStart(doExecScanhistoryInFuture, SCANHISTORY_IDLE_TIME_SLICE, pTask, streamTimer,
|
int64_t* pTaskRefId = NULL;
|
||||||
&pTask->schedHistoryInfo.pTimer, pTask->pMeta->vgId, " start-history-task-tmr");
|
int32_t code = streamTaskAllocRefId(pTask, &pTaskRefId);
|
||||||
|
if (code == 0) {
|
||||||
|
streamTmrStart(doExecScanhistoryInFuture, SCANHISTORY_IDLE_TIME_SLICE, pTaskRefId, streamTimer,
|
||||||
|
&pTask->schedHistoryInfo.pTimer, pTask->pMeta->vgId, " start-history-task-tmr");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pTask->pMeta, pTask);
|
||||||
|
streamTaskFreeRefId(param);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t doStartScanHistoryTask(SStreamTask* pTask) {
|
int32_t doStartScanHistoryTask(SStreamTask* pTask) {
|
||||||
|
|
|
@ -196,19 +196,17 @@ int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int3
|
||||||
STaskId id = {.streamId = streamId, .taskId = taskId};
|
STaskId id = {.streamId = streamId, .taskId = taskId};
|
||||||
int32_t vgId = pMeta->vgId;
|
int32_t vgId = pMeta->vgId;
|
||||||
bool allRsp = true;
|
bool allRsp = true;
|
||||||
|
SStreamTask* p = NULL;
|
||||||
|
|
||||||
streamMetaWLock(pMeta);
|
streamMetaWLock(pMeta);
|
||||||
SStreamTask** p = taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
int32_t code = streamMetaAcquireTaskUnsafe(pMeta, &id, &p);
|
||||||
if (p == NULL) { // task does not exists in current vnode, not record the complete info
|
if (code != 0) { // task does not exist in current vnode, not record the complete info
|
||||||
stError("vgId:%d s-task:0x%x not exists discard the check downstream info", vgId, taskId);
|
stError("vgId:%d s-task:0x%x not exists discard the check downstream info", vgId, taskId);
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// clear the send consensus-checkpointId flag
|
streamMetaReleaseTask(pMeta, p);
|
||||||
// streamMutexLock(&(*p)->lock);
|
|
||||||
// (*p)->status.sendConsensusChkptId = false;
|
|
||||||
// streamMutexUnlock(&(*p)->lock);
|
|
||||||
|
|
||||||
if (pStartInfo->startAllTasks != 1) {
|
if (pStartInfo->startAllTasks != 1) {
|
||||||
int64_t el = endTs - startTs;
|
int64_t el = endTs - startTs;
|
||||||
|
@ -222,7 +220,7 @@ int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int3
|
||||||
|
|
||||||
STaskInitTs initTs = {.start = startTs, .end = endTs, .success = ready};
|
STaskInitTs initTs = {.start = startTs, .end = endTs, .success = ready};
|
||||||
SHashObj* pDst = ready ? pStartInfo->pReadyTaskSet : pStartInfo->pFailedTaskSet;
|
SHashObj* pDst = ready ? pStartInfo->pReadyTaskSet : pStartInfo->pFailedTaskSet;
|
||||||
int32_t code = taosHashPut(pDst, &id, sizeof(id), &initTs, sizeof(STaskInitTs));
|
code = taosHashPut(pDst, &id, sizeof(id), &initTs, sizeof(STaskInitTs));
|
||||||
if (code) {
|
if (code) {
|
||||||
if (code == TSDB_CODE_DUP_KEY) {
|
if (code == TSDB_CODE_DUP_KEY) {
|
||||||
stError("vgId:%d record start task result failed, s-task:0x%" PRIx64
|
stError("vgId:%d record start task result failed, s-task:0x%" PRIx64
|
||||||
|
@ -296,13 +294,14 @@ void displayStatusInfo(SStreamMeta* pMeta, SHashObj* pTaskSet, bool succ) {
|
||||||
while ((pIter = taosHashIterate(pTaskSet, pIter)) != NULL) {
|
while ((pIter = taosHashIterate(pTaskSet, pIter)) != NULL) {
|
||||||
STaskInitTs* pInfo = pIter;
|
STaskInitTs* pInfo = pIter;
|
||||||
void* key = taosHashGetKey(pIter, &keyLen);
|
void* key = taosHashGetKey(pIter, &keyLen);
|
||||||
|
SStreamTask* pTask = NULL;
|
||||||
SStreamTask** pTask1 = taosHashGet(pMeta->pTasksMap, key, sizeof(STaskId));
|
int32_t code = streamMetaAcquireTaskUnsafe(pMeta, key, &pTask);
|
||||||
if (pTask1 == NULL) {
|
if (code == 0) {
|
||||||
stInfo("s-task:0x%x is dropped already, %s", (int32_t)((STaskId*)key)->taskId, succ ? "success" : "failed");
|
stInfo("s-task:%s level:%d vgId:%d, init:%" PRId64 ", initEnd:%" PRId64 ", %s", pTask->id.idStr,
|
||||||
|
pTask->info.taskLevel, vgId, pInfo->start, pInfo->end, pInfo->success ? "success" : "failed");
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
} else {
|
} else {
|
||||||
stInfo("s-task:%s level:%d vgId:%d, init:%" PRId64 ", initEnd:%" PRId64 ", %s", (*pTask1)->id.idStr,
|
stInfo("s-task:0x%x is dropped already, %s", (int32_t)((STaskId*)key)->taskId, succ ? "success" : "failed");
|
||||||
(*pTask1)->info.taskLevel, vgId, pInfo->start, pInfo->end, pInfo->success ? "success" : "failed");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -356,7 +355,7 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas
|
||||||
// fill-history task can only be launched by related stream tasks.
|
// fill-history task can only be launched by related stream tasks.
|
||||||
STaskExecStatisInfo* pInfo = &pTask->execInfo;
|
STaskExecStatisInfo* pInfo = &pTask->execInfo;
|
||||||
if (pTask->info.fillHistory == 1) {
|
if (pTask->info.fillHistory == 1) {
|
||||||
stError("s-task:0x%x vgId:%d fill-histroy task, not start here", taskId, vgId);
|
stError("s-task:0x%x vgId:%d fill-history task, not start here", taskId, vgId);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -364,6 +363,7 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas
|
||||||
// the start all tasks procedure may happen to start the newly deployed stream task, and results in the
|
// the start all tasks procedure may happen to start the newly deployed stream task, and results in the
|
||||||
// concurrently start this task by two threads.
|
// concurrently start this task by two threads.
|
||||||
streamMutexLock(&pTask->lock);
|
streamMutexLock(&pTask->lock);
|
||||||
|
|
||||||
SStreamTaskState status = streamTaskGetStatus(pTask);
|
SStreamTaskState status = streamTaskGetStatus(pTask);
|
||||||
if (status.state != TASK_STATUS__UNINIT) {
|
if (status.state != TASK_STATUS__UNINIT) {
|
||||||
stError("s-task:0x%x vgId:%d status:%s not uninit status, not start stream task", taskId, vgId, status.name);
|
stError("s-task:0x%x vgId:%d status:%s not uninit status, not start stream task", taskId, vgId, status.name);
|
||||||
|
@ -380,6 +380,7 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas
|
||||||
|
|
||||||
if(pTask->status.downstreamReady != 0) {
|
if(pTask->status.downstreamReady != 0) {
|
||||||
stFatal("s-task:0x%x downstream should be not ready, but it ready here, internal error happens", taskId);
|
stFatal("s-task:0x%x downstream should be not ready, but it ready here, internal error happens", taskId);
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
return TSDB_CODE_STREAM_INTERNAL_ERROR;
|
return TSDB_CODE_STREAM_INTERNAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -396,7 +397,7 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas
|
||||||
streamMutexUnlock(&pTask->lock);
|
streamMutexUnlock(&pTask->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
// concurrently start task may cause the later started task be failed, and also failed to added into meta result.
|
// concurrently start task may cause the latter started task be failed, and also failed to added into meta result.
|
||||||
if (code == TSDB_CODE_SUCCESS) {
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT);
|
code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
@ -417,8 +418,10 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas
|
||||||
int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) {
|
int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) {
|
||||||
streamMetaRLock(pMeta);
|
streamMetaRLock(pMeta);
|
||||||
|
|
||||||
|
SArray* pTaskList = NULL;
|
||||||
int32_t num = taosArrayGetSize(pMeta->pTaskList);
|
int32_t num = taosArrayGetSize(pMeta->pTaskList);
|
||||||
stDebug("vgId:%d stop all %d stream task(s)", pMeta->vgId, num);
|
stDebug("vgId:%d stop all %d stream task(s)", pMeta->vgId, num);
|
||||||
|
|
||||||
if (num == 0) {
|
if (num == 0) {
|
||||||
stDebug("vgId:%d stop all %d task(s) completed, elapsed time:0 Sec.", pMeta->vgId, num);
|
stDebug("vgId:%d stop all %d task(s) completed, elapsed time:0 Sec.", pMeta->vgId, num);
|
||||||
streamMetaRUnLock(pMeta);
|
streamMetaRUnLock(pMeta);
|
||||||
|
@ -428,14 +431,12 @@ int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) {
|
||||||
int64_t st = taosGetTimestampMs();
|
int64_t st = taosGetTimestampMs();
|
||||||
|
|
||||||
// send hb msg to mnode before closing all tasks.
|
// send hb msg to mnode before closing all tasks.
|
||||||
SArray* pTaskList = NULL;
|
|
||||||
int32_t code = streamMetaSendMsgBeforeCloseTasks(pMeta, &pTaskList);
|
int32_t code = streamMetaSendMsgBeforeCloseTasks(pMeta, &pTaskList);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t numOfTasks = taosArrayGetSize(pTaskList);
|
int32_t numOfTasks = taosArrayGetSize(pTaskList);
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfTasks; ++i) {
|
for (int32_t i = 0; i < numOfTasks; ++i) {
|
||||||
SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i);
|
SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i);
|
||||||
SStreamTask* pTask = NULL;
|
SStreamTask* pTask = NULL;
|
||||||
|
@ -445,10 +446,12 @@ int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int64_t refId = pTask->id.refId;
|
||||||
int32_t ret = streamTaskStop(pTask);
|
int32_t ret = streamTaskStop(pTask);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
stError("s-task:0x%x failed to stop task, code:%s", pTaskId->taskId, tstrerror(ret));
|
stError("s-task:0x%x failed to stop task, code:%s", pTaskId->taskId, tstrerror(ret));
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -466,6 +469,7 @@ int32_t streamTaskCheckIfReqConsenChkptId(SStreamTask* pTask, int64_t ts) {
|
||||||
|
|
||||||
int32_t vgId = pTask->pMeta->vgId;
|
int32_t vgId = pTask->pMeta->vgId;
|
||||||
if (pConChkptInfo->status == TASK_CONSEN_CHKPT_REQ) {
|
if (pConChkptInfo->status == TASK_CONSEN_CHKPT_REQ) {
|
||||||
|
// mark the sending of req consensus checkpoint request.
|
||||||
pConChkptInfo->status = TASK_CONSEN_CHKPT_SEND;
|
pConChkptInfo->status = TASK_CONSEN_CHKPT_SEND;
|
||||||
pConChkptInfo->statusTs = ts;
|
pConChkptInfo->statusTs = ts;
|
||||||
stDebug("s-task:%s vgId:%d set requiring consensus-chkptId in hbMsg, ts:%" PRId64, pTask->id.idStr,
|
stDebug("s-task:%s vgId:%d set requiring consensus-chkptId in hbMsg, ts:%" PRId64, pTask->id.idStr,
|
||||||
|
@ -473,6 +477,8 @@ int32_t streamTaskCheckIfReqConsenChkptId(SStreamTask* pTask, int64_t ts) {
|
||||||
return 1;
|
return 1;
|
||||||
} else {
|
} else {
|
||||||
int32_t el = (ts - pConChkptInfo->statusTs) / 1000;
|
int32_t el = (ts - pConChkptInfo->statusTs) / 1000;
|
||||||
|
|
||||||
|
// not recv consensus-checkpoint rsp for 60sec, send it again in hb to mnode
|
||||||
if ((pConChkptInfo->status == TASK_CONSEN_CHKPT_SEND) && el > 60) {
|
if ((pConChkptInfo->status == TASK_CONSEN_CHKPT_SEND) && el > 60) {
|
||||||
pConChkptInfo->statusTs = ts;
|
pConChkptInfo->statusTs = ts;
|
||||||
|
|
||||||
|
@ -492,7 +498,7 @@ void streamTaskSetConsenChkptIdRecv(SStreamTask* pTask, int32_t transId, int64_t
|
||||||
pInfo->status = TASK_CONSEN_CHKPT_RECV;
|
pInfo->status = TASK_CONSEN_CHKPT_RECV;
|
||||||
pInfo->statusTs = ts;
|
pInfo->statusTs = ts;
|
||||||
|
|
||||||
stDebug("s-task:%s set recv consen-checkpointId, transId:%d", pTask->id.idStr, transId);
|
stInfo("s-task:%s set recv consen-checkpointId, transId:%d", pTask->id.idStr, transId);
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamTaskSetReqConsenChkptId(SStreamTask* pTask, int64_t ts) {
|
void streamTaskSetReqConsenChkptId(SStreamTask* pTask, int64_t ts) {
|
||||||
|
@ -507,23 +513,24 @@ void streamTaskSetReqConsenChkptId(SStreamTask* pTask, int64_t ts) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamMetaAddFailedTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) {
|
int32_t streamMetaAddFailedTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) {
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
int64_t now = taosGetTimestampMs();
|
int64_t now = taosGetTimestampMs();
|
||||||
int64_t startTs = 0;
|
int64_t startTs = 0;
|
||||||
bool hasFillhistoryTask = false;
|
bool hasFillhistoryTask = false;
|
||||||
STaskId hId = {0};
|
STaskId hId = {0};
|
||||||
|
STaskId id = {.streamId = streamId, .taskId = taskId};
|
||||||
|
SStreamTask* pTask = NULL;
|
||||||
|
|
||||||
stDebug("vgId:%d add start failed task:0x%x", pMeta->vgId, taskId);
|
stDebug("vgId:%d add start failed task:0x%x", pMeta->vgId, taskId);
|
||||||
|
|
||||||
streamMetaRLock(pMeta);
|
streamMetaRLock(pMeta);
|
||||||
|
|
||||||
STaskId id = {.streamId = streamId, .taskId = taskId};
|
code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask);
|
||||||
SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
if (code == 0) {
|
||||||
|
startTs = pTask->taskCheckInfo.startTs;
|
||||||
if (ppTask != NULL) {
|
hasFillhistoryTask = HAS_RELATED_FILLHISTORY_TASK(pTask);
|
||||||
startTs = (*ppTask)->taskCheckInfo.startTs;
|
hId = pTask->hTaskInfo.id;
|
||||||
hasFillhistoryTask = HAS_RELATED_FILLHISTORY_TASK(*ppTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
hId = (*ppTask)->hTaskInfo.id;
|
|
||||||
|
|
||||||
streamMetaRUnLock(pMeta);
|
streamMetaRUnLock(pMeta);
|
||||||
|
|
||||||
|
|
|
@ -211,22 +211,23 @@ int32_t tDecodeStreamTaskId(SDecoder* pDecoder, STaskId* pTaskId) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void tFreeStreamTask(SStreamTask* pTask) {
|
void tFreeStreamTask(void* pParam) {
|
||||||
char* p = NULL;
|
char* p = NULL;
|
||||||
int32_t taskId = pTask->id.taskId;
|
SStreamTask* pTask = pParam;
|
||||||
|
int32_t taskId = pTask->id.taskId;
|
||||||
|
|
||||||
STaskExecStatisInfo* pStatis = &pTask->execInfo;
|
STaskExecStatisInfo* pStatis = &pTask->execInfo;
|
||||||
|
|
||||||
ETaskStatus status1 = TASK_STATUS__UNINIT;
|
ETaskStatus status1 = TASK_STATUS__UNINIT;
|
||||||
streamMutexLock(&pTask->lock);
|
streamMutexLock(&pTask->lock);
|
||||||
if (pTask->status.pSM != NULL) {
|
if (pTask->status.pSM != NULL) {
|
||||||
SStreamTaskState pStatus = streamTaskGetStatus(pTask);
|
SStreamTaskState status = streamTaskGetStatus(pTask);
|
||||||
p = pStatus.name;
|
p = status.name;
|
||||||
status1 = pStatus.state;
|
status1 = status.state;
|
||||||
}
|
}
|
||||||
streamMutexUnlock(&pTask->lock);
|
streamMutexUnlock(&pTask->lock);
|
||||||
|
|
||||||
stDebug("start to free s-task:0x%x %p, state:%s", taskId, pTask, p);
|
stDebug("start to free s-task:0x%x %p, state:%s, refId:%" PRId64, taskId, pTask, p, pTask->id.refId);
|
||||||
|
|
||||||
SCheckpointInfo* pCkInfo = &pTask->chkInfo;
|
SCheckpointInfo* pCkInfo = &pTask->chkInfo;
|
||||||
stDebug("s-task:0x%x task exec summary: create:%" PRId64 ", init:%" PRId64 ", start:%" PRId64
|
stDebug("s-task:0x%x task exec summary: create:%" PRId64 ", init:%" PRId64 ", start:%" PRId64
|
||||||
|
@ -235,12 +236,6 @@ void tFreeStreamTask(SStreamTask* pTask) {
|
||||||
taskId, pStatis->created, pStatis->checkTs, pStatis->readyTs, pStatis->updateCount, pStatis->latestUpdateTs,
|
taskId, pStatis->created, pStatis->checkTs, pStatis->readyTs, pStatis->updateCount, pStatis->latestUpdateTs,
|
||||||
pCkInfo->checkpointId, pCkInfo->checkpointVer, pCkInfo->nextProcessVer, pStatis->checkpoint);
|
pCkInfo->checkpointId, pCkInfo->checkpointVer, pCkInfo->nextProcessVer, pStatis->checkpoint);
|
||||||
|
|
||||||
// remove the ref by timer
|
|
||||||
while (pTask->status.timerActive > 0) {
|
|
||||||
stDebug("s-task:%s wait for task stop timer activities, ref:%d", pTask->id.idStr, pTask->status.timerActive);
|
|
||||||
taosMsleep(100);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pTask->schedInfo.pDelayTimer != NULL) {
|
if (pTask->schedInfo.pDelayTimer != NULL) {
|
||||||
streamTmrStop(pTask->schedInfo.pDelayTimer);
|
streamTmrStop(pTask->schedInfo.pDelayTimer);
|
||||||
pTask->schedInfo.pDelayTimer = NULL;
|
pTask->schedInfo.pDelayTimer = NULL;
|
||||||
|
@ -428,8 +423,7 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
pTask->refCnt = 1;
|
pTask->id.refId = 0;
|
||||||
|
|
||||||
pTask->inputq.status = TASK_INPUT_STATUS__NORMAL;
|
pTask->inputq.status = TASK_INPUT_STATUS__NORMAL;
|
||||||
pTask->outputq.status = TASK_OUTPUT_STATUS__NORMAL;
|
pTask->outputq.status = TASK_OUTPUT_STATUS__NORMAL;
|
||||||
|
|
||||||
|
@ -441,7 +435,6 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i
|
||||||
}
|
}
|
||||||
|
|
||||||
pTask->status.schedStatus = TASK_SCHED_STATUS__INACTIVE;
|
pTask->status.schedStatus = TASK_SCHED_STATUS__INACTIVE;
|
||||||
pTask->status.timerActive = 0;
|
|
||||||
|
|
||||||
code = streamCreateStateMachine(pTask);
|
code = streamCreateStateMachine(pTask);
|
||||||
if (pTask->status.pSM == NULL || code != TSDB_CODE_SUCCESS) {
|
if (pTask->status.pSM == NULL || code != TSDB_CODE_SUCCESS) {
|
||||||
|
@ -837,28 +830,31 @@ int8_t streamTaskSetSchedStatusInactive(SStreamTask* pTask) {
|
||||||
int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, int32_t resetRelHalt) {
|
int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, int32_t resetRelHalt) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
SStreamMeta* pMeta = pTask->pMeta;
|
SStreamMeta* pMeta = pTask->pMeta;
|
||||||
STaskId sTaskId = {.streamId = pTask->streamTaskId.streamId, .taskId = pTask->streamTaskId.taskId};
|
SStreamTask* pStreamTask = NULL;
|
||||||
|
|
||||||
if (pTask->info.fillHistory == 0) {
|
if (pTask->info.fillHistory == 0) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamTask** ppStreamTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &sTaskId, sizeof(sTaskId));
|
code = streamMetaAcquireTaskUnsafe(pMeta, &pTask->streamTaskId, &pStreamTask);
|
||||||
if (ppStreamTask != NULL) {
|
if (code == 0) {
|
||||||
stDebug("s-task:%s clear the related stream task:0x%x attr to fill-history task", pTask->id.idStr,
|
stDebug("s-task:%s clear the related stream task:0x%x attr to fill-history task", pTask->id.idStr,
|
||||||
(int32_t)sTaskId.taskId);
|
(int32_t)pTask->streamTaskId.taskId);
|
||||||
|
|
||||||
streamMutexLock(&(*ppStreamTask)->lock);
|
streamMutexLock(&(pStreamTask->lock));
|
||||||
CLEAR_RELATED_FILLHISTORY_TASK((*ppStreamTask));
|
CLEAR_RELATED_FILLHISTORY_TASK(pStreamTask);
|
||||||
|
|
||||||
if (resetRelHalt) {
|
if (resetRelHalt) {
|
||||||
stDebug("s-task:0x%" PRIx64 " set the persistent status attr to be ready, prev:%s, status in sm:%s",
|
stDebug("s-task:0x%" PRIx64 " set the persistent status attr to be ready, prev:%s, status in sm:%s",
|
||||||
sTaskId.taskId, streamTaskGetStatusStr((*ppStreamTask)->status.taskStatus),
|
pTask->streamTaskId.taskId, streamTaskGetStatusStr(pStreamTask->status.taskStatus),
|
||||||
streamTaskGetStatus(*ppStreamTask).name);
|
streamTaskGetStatus(pStreamTask).name);
|
||||||
(*ppStreamTask)->status.taskStatus = TASK_STATUS__READY;
|
pStreamTask->status.taskStatus = TASK_STATUS__READY;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = streamMetaSaveTask(pMeta, *ppStreamTask);
|
code = streamMetaSaveTask(pMeta, pStreamTask);
|
||||||
streamMutexUnlock(&(*ppStreamTask)->lock);
|
streamMutexUnlock(&(pStreamTask->lock));
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pMeta, pStreamTask);
|
||||||
}
|
}
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
|
@ -1282,3 +1278,27 @@ const char* streamTaskGetExecType(int32_t type) {
|
||||||
return "invalid-exec-type";
|
return "invalid-exec-type";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t streamTaskAllocRefId(SStreamTask* pTask, int64_t** pRefId) {
|
||||||
|
*pRefId = taosMemoryMalloc(sizeof(int64_t));
|
||||||
|
if (*pRefId != NULL) {
|
||||||
|
**pRefId = pTask->id.refId;
|
||||||
|
int32_t code = metaRefMgtAdd(pTask->pMeta->vgId, *pRefId);
|
||||||
|
if (code != 0) {
|
||||||
|
stError("s-task:%s failed to add refId:%" PRId64 " into refId-mgmt, code:%s", pTask->id.idStr, pTask->id.refId,
|
||||||
|
tstrerror(code));
|
||||||
|
}
|
||||||
|
return code;
|
||||||
|
} else {
|
||||||
|
stError("s-task:%s failed to alloc new ref id, code:%s", pTask->id.idStr, tstrerror(terrno));
|
||||||
|
return terrno;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void streamTaskFreeRefId(int64_t* pRefId) {
|
||||||
|
if (pRefId == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
metaRefMgtRemove(pRefId);
|
||||||
|
}
|
|
@ -66,15 +66,9 @@ void streamTmrStop(tmr_h tmrId) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamCleanBeforeQuitTmr(SStreamTmrInfo* pInfo, SStreamTask* pTask) {
|
void streamCleanBeforeQuitTmr(SStreamTmrInfo* pInfo, void* param) {
|
||||||
pInfo->activeCounter = 0;
|
pInfo->activeCounter = 0;
|
||||||
pInfo->launchChkptId = 0;
|
pInfo->launchChkptId = 0;
|
||||||
atomic_store_8(&pInfo->isActive, 0);
|
atomic_store_8(&pInfo->isActive, 0);
|
||||||
|
streamTaskFreeRefId(param);
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
|
||||||
if (ref < 0) {
|
|
||||||
stFatal("invalid task timer ref value:%d, %s", ref, pTask->id.idStr);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ref;
|
|
||||||
}
|
}
|
|
@ -158,7 +158,7 @@ int main(int argc, char *argv[]) {
|
||||||
}
|
}
|
||||||
|
|
||||||
initLogEnv();
|
initLogEnv();
|
||||||
taosVersionStrToInt(version, &(rpcInit.compatibilityVer));
|
taosVersionStrToInt(td_version, &(rpcInit.compatibilityVer));
|
||||||
void *pRpc = rpcOpen(&rpcInit);
|
void *pRpc = rpcOpen(&rpcInit);
|
||||||
if (pRpc == NULL) {
|
if (pRpc == NULL) {
|
||||||
tError("failed to initialize RPC");
|
tError("failed to initialize RPC");
|
||||||
|
|
|
@ -127,7 +127,7 @@ int main(int argc, char *argv[]) {
|
||||||
rpcInit.cfp = processRequestMsg;
|
rpcInit.cfp = processRequestMsg;
|
||||||
rpcInit.idleTime = 2 * 1500;
|
rpcInit.idleTime = 2 * 1500;
|
||||||
|
|
||||||
taosVersionStrToInt(version, &(rpcInit.compatibilityVer));
|
taosVersionStrToInt(td_version, &(rpcInit.compatibilityVer));
|
||||||
rpcDebugFlag = 131;
|
rpcDebugFlag = 131;
|
||||||
rpcInit.compressSize = -1;
|
rpcInit.compressSize = -1;
|
||||||
|
|
||||||
|
@ -165,7 +165,7 @@ int main(int argc, char *argv[]) {
|
||||||
rpcInit.connType = TAOS_CONN_SERVER;
|
rpcInit.connType = TAOS_CONN_SERVER;
|
||||||
|
|
||||||
initLogEnv();
|
initLogEnv();
|
||||||
taosVersionStrToInt(version, &(rpcInit.compatibilityVer));
|
taosVersionStrToInt(td_version, &(rpcInit.compatibilityVer));
|
||||||
void *pRpc = rpcOpen(&rpcInit);
|
void *pRpc = rpcOpen(&rpcInit);
|
||||||
if (pRpc == NULL) {
|
if (pRpc == NULL) {
|
||||||
tError("failed to start RPC server");
|
tError("failed to start RPC server");
|
||||||
|
|
|
@ -56,7 +56,7 @@ class Client {
|
||||||
rpcInit_.connType = TAOS_CONN_CLIENT;
|
rpcInit_.connType = TAOS_CONN_CLIENT;
|
||||||
rpcInit_.shareConnLimit = 200;
|
rpcInit_.shareConnLimit = 200;
|
||||||
|
|
||||||
taosVersionStrToInt(version, &(rpcInit_.compatibilityVer));
|
taosVersionStrToInt(td_version, &(rpcInit_.compatibilityVer));
|
||||||
this->transCli = rpcOpen(&rpcInit_);
|
this->transCli = rpcOpen(&rpcInit_);
|
||||||
tsem_init(&this->sem, 0, 0);
|
tsem_init(&this->sem, 0, 0);
|
||||||
}
|
}
|
||||||
|
@ -69,7 +69,7 @@ class Client {
|
||||||
void Restart(CB cb) {
|
void Restart(CB cb) {
|
||||||
rpcClose(this->transCli);
|
rpcClose(this->transCli);
|
||||||
rpcInit_.cfp = cb;
|
rpcInit_.cfp = cb;
|
||||||
taosVersionStrToInt(version, &(rpcInit_.compatibilityVer));
|
taosVersionStrToInt(td_version, &(rpcInit_.compatibilityVer));
|
||||||
this->transCli = rpcOpen(&rpcInit_);
|
this->transCli = rpcOpen(&rpcInit_);
|
||||||
}
|
}
|
||||||
void Stop() {
|
void Stop() {
|
||||||
|
@ -129,7 +129,7 @@ class Server {
|
||||||
rpcInit_.cfp = processReq;
|
rpcInit_.cfp = processReq;
|
||||||
rpcInit_.user = (char *)user;
|
rpcInit_.user = (char *)user;
|
||||||
rpcInit_.connType = TAOS_CONN_SERVER;
|
rpcInit_.connType = TAOS_CONN_SERVER;
|
||||||
taosVersionStrToInt(version, &(rpcInit_.compatibilityVer));
|
taosVersionStrToInt(td_version, &(rpcInit_.compatibilityVer));
|
||||||
}
|
}
|
||||||
void Start() {
|
void Start() {
|
||||||
this->transSrv = rpcOpen(&this->rpcInit_);
|
this->transSrv = rpcOpen(&this->rpcInit_);
|
||||||
|
|
|
@ -56,7 +56,7 @@ class Client {
|
||||||
rpcInit_.connType = TAOS_CONN_CLIENT;
|
rpcInit_.connType = TAOS_CONN_CLIENT;
|
||||||
rpcInit_.shareConnLimit = 200;
|
rpcInit_.shareConnLimit = 200;
|
||||||
|
|
||||||
taosVersionStrToInt(version, &(rpcInit_.compatibilityVer));
|
taosVersionStrToInt(td_version, &(rpcInit_.compatibilityVer));
|
||||||
this->transCli = rpcOpen(&rpcInit_);
|
this->transCli = rpcOpen(&rpcInit_);
|
||||||
//tsem_init(&this->sem, 0, 0);
|
//tsem_init(&this->sem, 0, 0);
|
||||||
}
|
}
|
||||||
|
@ -69,7 +69,7 @@ class Client {
|
||||||
void Restart(CB cb) {
|
void Restart(CB cb) {
|
||||||
rpcClose(this->transCli);
|
rpcClose(this->transCli);
|
||||||
rpcInit_.cfp = cb;
|
rpcInit_.cfp = cb;
|
||||||
taosVersionStrToInt(version, &(rpcInit_.compatibilityVer));
|
taosVersionStrToInt(td_version, &(rpcInit_.compatibilityVer));
|
||||||
this->transCli = rpcOpen(&rpcInit_);
|
this->transCli = rpcOpen(&rpcInit_);
|
||||||
}
|
}
|
||||||
void Stop() {
|
void Stop() {
|
||||||
|
@ -139,7 +139,7 @@ class Server {
|
||||||
rpcInit_.cfp = processReq;
|
rpcInit_.cfp = processReq;
|
||||||
rpcInit_.user = (char *)user;
|
rpcInit_.user = (char *)user;
|
||||||
rpcInit_.connType = TAOS_CONN_SERVER;
|
rpcInit_.connType = TAOS_CONN_SERVER;
|
||||||
taosVersionStrToInt(version, &(rpcInit_.compatibilityVer));
|
taosVersionStrToInt(td_version, &(rpcInit_.compatibilityVer));
|
||||||
}
|
}
|
||||||
void Start() {
|
void Start() {
|
||||||
this->transSrv = rpcOpen(&this->rpcInit_);
|
this->transSrv = rpcOpen(&this->rpcInit_);
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
char version[64] = "${TD_VER_NUMBER}";
|
char td_version[64] = "${TD_VER_NUMBER}";
|
||||||
char compatible_version[12] = "${TD_VER_COMPATIBLE}";
|
char td_compatible_version[12] = "${TD_VER_COMPATIBLE}";
|
||||||
char gitinfo[48] = "${TD_VER_GIT}";
|
char td_gitinfo[48] = "${TD_VER_GIT}";
|
||||||
char gitinfoOfInternal[48] = "${TD_VER_GIT_INTERNAL}";
|
char td_gitinfoOfInternal[48] = "${TD_VER_GIT_INTERNAL}";
|
||||||
char buildinfo[64] = "${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}";
|
char td_buildinfo[64] = "${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}";
|
||||||
|
|
||||||
void libtaos_${TD_LIB_VER_NUMBER}_${TD_VER_OSTYPE}_${TD_VER_CPUTYPE}_${TD_VER_VERTYPE}() {};
|
void libtaos_${TD_LIB_VER_NUMBER}_${TD_VER_OSTYPE}_${TD_VER_CPUTYPE}_${TD_VER_VERTYPE}() {};
|
|
@ -0,0 +1,375 @@
|
||||||
|
from frame.log import *
|
||||||
|
from frame.cases import *
|
||||||
|
from frame.sql import *
|
||||||
|
from frame.caseBase import *
|
||||||
|
from frame import *
|
||||||
|
from frame.eos import *
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase(TBase):
|
||||||
|
"""Verify the case...when... expression in the query statement
|
||||||
|
"""
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor())
|
||||||
|
self.stable_schema = {
|
||||||
|
"columns": {
|
||||||
|
"ts": "timestamp",
|
||||||
|
"c_null": "int",
|
||||||
|
"c_bool": "bool",
|
||||||
|
"c_tinyint": "tinyint",
|
||||||
|
"c_smallint": "smallint",
|
||||||
|
"c_int": "int",
|
||||||
|
"c_bigint": "bigint",
|
||||||
|
"c_float": "float",
|
||||||
|
"c_double": "double",
|
||||||
|
"c_varchar": "varchar(16)",
|
||||||
|
"c_timestamp": "timestamp",
|
||||||
|
"c_nchar": "nchar(16)",
|
||||||
|
"c_utinyint": "tinyint unsigned",
|
||||||
|
"c_usmallint": "smallint unsigned",
|
||||||
|
"c_uint": "int unsigned",
|
||||||
|
"c_ubigint": "bigint unsigned",
|
||||||
|
"c_varbinary": "varbinary(16)",
|
||||||
|
"c_geometry": "geometry(32)"
|
||||||
|
},
|
||||||
|
"tags": {
|
||||||
|
"t_null": "int",
|
||||||
|
"t_bool": "bool",
|
||||||
|
"t_tinyint": "tinyint",
|
||||||
|
"t_smallint": "smallint",
|
||||||
|
"t_int": "int",
|
||||||
|
"t_bigint": "bigint",
|
||||||
|
"t_float": "float",
|
||||||
|
"t_double": "double",
|
||||||
|
"t_varchar": "varchar(16)",
|
||||||
|
"t_timestamp": "timestamp",
|
||||||
|
"t_nchar": "nchar(16)",
|
||||||
|
"t_utinyint": "tinyint unsigned",
|
||||||
|
"t_usmallint": "smallint unsigned",
|
||||||
|
"t_uint": "int unsigned",
|
||||||
|
"t_ubigint": "bigint unsigned",
|
||||||
|
"t_varbinary": "varbinary(16)",
|
||||||
|
"t_geometry": "geometry(32)"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def prepare_data(self):
|
||||||
|
# create database
|
||||||
|
tdSql.execute("create database test_case_when;")
|
||||||
|
tdSql.execute("use test_case_when;")
|
||||||
|
# create stable
|
||||||
|
columns = ",".join([f"{k} {v}" for k, v in self.stable_schema["columns"].items()])
|
||||||
|
tags = ",".join([f"{k} {v}" for k, v in self.stable_schema["tags"].items()])
|
||||||
|
st_sql = f"create stable st1 ({columns}) tags ({tags});"
|
||||||
|
tdSql.execute(st_sql)
|
||||||
|
st_sql_json_tag = f"create stable st2 ({columns}) tags (t json);"
|
||||||
|
tdSql.execute(st_sql_json_tag)
|
||||||
|
# create child table
|
||||||
|
tdSql.execute("create table ct1 using st1 tags(NULL, True, 1, 1, 1, 1, 1.1, 1.11, 'aaaaaaaa', '2021-09-01 00:00:00.000', 'aaaaaaaa', 1, 1, 1, 1, \"0x06\",'POINT(1 1)');")
|
||||||
|
tdSql.execute("""create table ct2 using st2 tags('{"name": "test", "location": "beijing"}');""")
|
||||||
|
# insert data
|
||||||
|
ct1_data = [
|
||||||
|
"""'2024-10-01 00:00:00.000', NULL, True, 2, 2, 2, 2, 2.2, 2.22, 'bbbbbbbb', '2021-09-01 00:00:00.000', 'bbbbbbbb', 2, 2, 2, 2, "0x07",'POINT(2 2)'""",
|
||||||
|
"""'2024-10-01 00:00:01.000', NULL, False, 3, 3, 3, 3, 3.3, 3.33, 'cccccccc', '2021-09-01 00:00:00.000', 'cccccccc', 3, 3, 3, 3, "0x08",'POINT(3 3)'""",
|
||||||
|
"""'2024-10-01 00:00:02.000', NULL, True, 4, 4, 4, 4, 4.4, 4.44, 'dddddddd', '2021-09-01 00:00:00.000', 'dddddddd', 4, 4, 4, 4, "0x09",'POINT(4 4)'""",
|
||||||
|
"""'2024-10-01 00:00:03.000', NULL, False, 5, 5, 5, 5, 5.5, 5.55, 'eeeeeeee', '2021-09-01 00:00:00.000', 'eeeeeeee', 5, 5, 5, 5, "0x0A",'POINT(5 5)'""",
|
||||||
|
"""'2024-10-01 00:00:04.000', NULL, True, 6, 6, 6, 6, 6.6, 6.66, 'ffffffff', '2021-09-01 00:00:00.000', 'ffffffff', 6, 6, 6, 6, "0x0B",'POINT(6 6)'""",
|
||||||
|
"""'2024-10-01 00:00:05.000', NULL, False, 7, 7, 7, 7, 7.7, 7.77, 'gggggggg', '2021-09-01 00:00:00.000', 'gggggggg', 7, 7, 7, 7, "0x0C",'POINT(7 7)'""",
|
||||||
|
"""'2024-10-01 00:00:06.000', NULL, True, 8, 8, 8, 8, 8.8, 8.88, 'hhhhhhhh', '2021-09-01 00:00:00.000', 'hhhhhhhh', 8, 8, 8, 8, "0x0D",'POINT(8 8)'""",
|
||||||
|
"""'2024-10-01 00:00:07.000', NULL, False, 9, 9, 9, 9, 9.9, 9.99, 'iiiiiiii', '2021-09-01 00:00:00.000', 'iiiiiiii', 9, 9, 9, 9, "0x0E",'POINT(9 9)'""",
|
||||||
|
"""'2024-10-01 00:00:08.000', NULL, True, 10, 10, 10, 10, 10.10, 10.1010, 'jjjjjjjj', '2021-09-01 00:00:00.000', 'jjjjjjjj', 10, 10, 10, 10, "0x0F",'POINT(10 10)'""",
|
||||||
|
"""'2024-10-01 00:00:09.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL"""
|
||||||
|
]
|
||||||
|
ct1_insert_sql = "insert into ct1 values(%s);" % "),(".join(ct1_data)
|
||||||
|
tdSql.execute(ct1_insert_sql)
|
||||||
|
ct2_data = [
|
||||||
|
"""'2024-10-01 00:00:00.000', NULL, True, 2, 2, 2, 2, 2.2, 2.22, 'bbbbbbbb', '2021-09-01 00:00:00.000', 'bbbbbbbb', 2, 2, 2, 2, "0x07",'POINT(2 2)'""",
|
||||||
|
"""'2024-10-01 00:00:01.000', NULL, False, 3, 3, 3, 3, 3.3, 3.33, 'cccccccc', '2021-09-01 00:00:00.000', 'cccccccc', 3, 3, 3, 3, "0x08",'POINT(3 3)'""",
|
||||||
|
"""'2024-10-01 00:00:02.000', NULL, True, 4, 4, 4, 4, 4.4, 4.44, 'dddddddd', '2021-09-01 00:00:00.000', 'dddddddd', 4, 4, 4, 4, "0x09",'POINT(4 4)'""",
|
||||||
|
"""'2024-10-01 00:00:03.000', NULL, False, 5, 5, 5, 5, 5.5, 5.55, 'eeeeeeee', '2021-09-01 00:00:00.000', 'eeeeeeee', 5, 5, 5, 5, "0x0A",'POINT(5 5)'""",
|
||||||
|
"""'2024-10-01 00:00:04.000', NULL, True, 6, 6, 6, 6, 6.6, 6.66, 'ffffffff', '2021-09-01 00:00:00.000', 'ffffffff', 6, 6, 6, 6, "0x0B",'POINT(6 6)'""",
|
||||||
|
"""'2024-10-01 00:00:05.000', NULL, False, 7, 7, 7, 7, 7.7, 7.77, 'gggggggg', '2021-09-01 00:00:00.000', 'gggggggg', 7, 7, 7, 7, "0x0C",'POINT(7 7)'""",
|
||||||
|
"""'2024-10-01 00:00:06.000', NULL, True, 8, 8, 8, 8, 8.8, 8.88, 'hhhhhhhh', '2021-09-01 00:00:00.000', 'hhhhhhhh', 8, 8, 8, 8, "0x0D",'POINT(8 8)'""",
|
||||||
|
"""'2024-10-01 00:00:07.000', NULL, False, 9, 9, 9, 9, 9.9, 9.99, 'iiiiiiii', '2021-09-01 00:00:00.000', 'iiiiiiii', 9, 9, 9, 9, "0x0E",'POINT(9 9)'""",
|
||||||
|
"""'2024-10-01 00:00:08.000', NULL, True, 10, 10, 10, 10, 10.10, 10.1010, 'jjjjjjjj', '2021-09-01 00:00:00.000', 'jjjjjjjj', 10, 10, 10, 10, "0x0F",'POINT(10 10)'""",
|
||||||
|
"""'2024-10-01 00:00:09.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL"""
|
||||||
|
]
|
||||||
|
ct2_insert_sql = "insert into ct2 values(%s);" % "),(".join(ct2_data)
|
||||||
|
tdSql.execute(ct2_insert_sql)
|
||||||
|
|
||||||
|
def test_case_when_statements(self):
|
||||||
|
tdSql.execute("use test_case_when;")
|
||||||
|
tdSql.query("select case when c_null is null then c_null else t_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res]))
|
||||||
|
|
||||||
|
tdSql.query("select case when c_null is not null then c_null else t_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res]))
|
||||||
|
|
||||||
|
tdSql.query("select case when c_bool is null then c_bool else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res]))
|
||||||
|
|
||||||
|
tdSql.query("select case when c_bool is not null then c_bool else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(1,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_tinyint is null then c_tinyint else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res]))
|
||||||
|
|
||||||
|
tdSql.query("select case when c_tinyint is not null then c_tinyint else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_smallint is null then c_smallint else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res]))
|
||||||
|
|
||||||
|
tdSql.query("select case when c_smallint is not null then c_smallint else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_int is null then c_int else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res]))
|
||||||
|
|
||||||
|
tdSql.query("select case when c_int is not null then c_int else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_bigint is null then c_bigint else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res]))
|
||||||
|
|
||||||
|
tdSql.query("select case when c_bigint is not null then c_bigint else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_float is null then c_float else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res]))
|
||||||
|
|
||||||
|
tdSql.query("select case when c_float is not null then c_float else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [('2.200000',), ('3.300000',), ('4.400000',), ('5.500000',), ('6.600000',), ('7.700000',), ('8.800000',), ('9.900000',), ('10.100000',), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_double is null then c_double else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res]))
|
||||||
|
|
||||||
|
tdSql.query("select case when c_double is not null then c_double else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [('2.220000',), ('3.330000',), ('4.440000',), ('5.550000',), ('6.660000',), ('7.770000',), ('8.880000',), ('9.990000',), ('10.101000',), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_varchar is null then c_varchar else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res]))
|
||||||
|
|
||||||
|
tdSql.query("select case when c_varchar is not null then c_varchar else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [('bbbbbbbb',), ('cccccccc',), ('dddddddd',), ('eeeeeeee',), ('ffffffff',), ('gggggggg',), ('hhhhhhhh',), ('iiiiiiii',), ('jjjjjjjj',), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_nchar is null then c_nchar else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res]))
|
||||||
|
|
||||||
|
tdSql.query("select case when c_nchar is not null then c_nchar else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [('bbbbbbbb',), ('cccccccc',), ('dddddddd',), ('eeeeeeee',), ('ffffffff',), ('gggggggg',), ('hhhhhhhh',), ('iiiiiiii',), ('jjjjjjjj',), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_utinyint is null then c_utinyint else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res]))
|
||||||
|
|
||||||
|
tdSql.query("select case when c_utinyint is not null then c_utinyint else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_usmallint is null then c_usmallint else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res]))
|
||||||
|
|
||||||
|
tdSql.query("select case when c_usmallint is not null then c_usmallint else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_uint is null then c_uint else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res]))
|
||||||
|
|
||||||
|
tdSql.query("select case when c_uint is not null then c_uint else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_ubigint is null then c_ubigint else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res]))
|
||||||
|
|
||||||
|
tdSql.query("select case when c_ubigint is not null then c_ubigint else c_null end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [('2',), ('3',), ('4',), ('5',), ('6',), ('7',), ('8',), ('9',), ('10',), (None,)])
|
||||||
|
|
||||||
|
tdSql.error("select case when c_varbinary is null then c_varbinary else c_null end from st1;")
|
||||||
|
tdSql.error("select case when c_varbinary is not null then c_varbinary else c_null end from st1;")
|
||||||
|
|
||||||
|
tdSql.query("select case when c_null is null then NULL else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res]))
|
||||||
|
|
||||||
|
tdSql.query("select case when c_null is not null then NULL else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(True,), (False,), (True,), (False,), (True,), (False,), (True,), (False,), (True,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_bool=true then NULL else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(None,), (False,), (None,), (False,), (None,), (False,), (None,), (False,), (None,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_bool!=true then NULL else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(True,), (None,), (True,), (None,), (True,), (None,), (True,), (None,), (True,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_tinyint=2 then c_tinyint else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_tinyint!=2 then c_tinyint else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_smallint=2 then c_smallint else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_smallint!=2 then c_smallint else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_int=2 then c_int else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_int!=2 then c_int else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_bigint=2 then c_bigint else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_bigint!=2 then c_bigint else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_float=2.2 then c_float else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res[1:] == [(0.0,), (1.0,), (0.0,), (1.0,), (0.0,), (1.0,), (0.0,), (1.0,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_float!=2.2 then c_float else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res[0] == (1.0,))
|
||||||
|
|
||||||
|
tdSql.query("select case when c_double=2.22 then c_double else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2.22,), (0.0,), (1.0,), (0.0,), (1.0,), (0.0,), (1.0,), (0.0,), (1.0,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_double!=2.2 then c_double else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2.22,), (3.33,), (4.44,), (5.55,), (6.66,), (7.77,), (8.88,), (9.99,), (10.101,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_varchar='bbbbbbbb' then c_varchar else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [('bbbbbbbb',), ('false',), ('true',), ('false',), ('true',), ('false',), ('true',), ('false',), ('true',), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_varchar!='bbbbbbbb' then c_varchar else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [('true',), ('cccccccc',), ('dddddddd',), ('eeeeeeee',), ('ffffffff',), ('gggggggg',), ('hhhhhhhh',), ('iiiiiiii',), ('jjjjjjjj',), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_timestamp='2021-09-01 00:00:00.000' then c_timestamp else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(1630425600000,), (1630425600000,), (1630425600000,), (1630425600000,), (1630425600000,), (1630425600000,), (1630425600000,), (1630425600000,), (1630425600000,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_timestamp!='2021-09-01 00:00:00.000' then c_timestamp else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(1,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_nchar='bbbbbbbb' then c_nchar else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [('bbbbbbbb',), ('false',), ('true',), ('false',), ('true',), ('false',), ('true',), ('false',), ('true',), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_nchar!='bbbbbbbb' then c_nchar else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [('true',), ('cccccccc',), ('dddddddd',), ('eeeeeeee',), ('ffffffff',), ('gggggggg',), ('hhhhhhhh',), ('iiiiiiii',), ('jjjjjjjj',), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_utinyint=2 then c_utinyint else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_utinyint!=2 then c_utinyint else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_usmallint=2 then c_usmallint else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_usmallint!=2 then c_usmallint else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_uint=2 then c_uint else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_uint!=2 then c_uint else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_ubigint=2 then c_ubigint else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_ubigint!=2 then c_ubigint else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_ubigint=2 then c_ubigint else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_ubigint!=2 then c_ubigint else c_bool end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.error("select case when c_varbinary='\x30783037' then c_varbinary else c_bool end from st1;")
|
||||||
|
tdSql.error("select case when c_varbinary!='\x30783037' then c_varbinary else c_bool end from st1;")
|
||||||
|
|
||||||
|
tdSql.query("select case when c_null is null then NULL else c_tinyint end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res]))
|
||||||
|
|
||||||
|
tdSql.query("select case when c_null is not null then NULL else c_tinyint end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_bool=true then false else c_tinyint end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(0,), (3,), (0,), (5,), (0,), (7,), (0,), (9,), (0,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_bool!=true then false else c_tinyint end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (4,), (0,), (6,), (0,), (8,), (0,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_smallint=2 then c_smallint else c_tinyint end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_smallint!=2 then c_smallint else c_tinyint end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_int=2 then c_smallint else c_int end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_int!=2 then c_smallint else c_int end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_float=2.2 then 387897 else 'test message' end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [('387897',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_double=2.22 then 387897 else 'test message' end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [('387897',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_varchar='cccccccc' then 'test' when c_varchar='bbbbbbbb' then 'bbbb' else 'test message' end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [('bbbb',), ('test',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',)])
|
||||||
|
|
||||||
|
tdSql.query("select case when ts='2024-10-01 00:00:04.000' then 456646546 when ts>'2024-10-01 00:00:04.000' then 'after today' else 'before today or unknow date' end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [('before today or unknow date',), ('before today or unknow date',), ('before today or unknow date',), ('before today or unknow date',), ('456646546',), ('after today',), ('after today',), ('after today',), ('after today',), ('after today',)])
|
||||||
|
|
||||||
|
tdSql.error("select case when c_geometry is null then c_geometry else c_null end from st1;")
|
||||||
|
tdSql.error("select case when c_geometry is not null then c_geometry else c_null end from st1;")
|
||||||
|
tdSql.error("select case when c_geometry='POINT(2 2)' then c_geometry else c_bool end from st1;")
|
||||||
|
tdSql.error("select case when c_geometry!='POINT(2 2)' then c_geometry else c_bool end from st1;")
|
||||||
|
|
||||||
|
tdSql.error("select case when t is null then t else c_null end from st2;")
|
||||||
|
tdSql.error("select case when t is not null then t else c_null end from st2;")
|
||||||
|
tdSql.error("select case when t->'location'='beijing' then t->'location' else c_bool end from st2;")
|
||||||
|
tdSql.error("select case when t->'location'!='beijing' then t->'location' else c_bool end from st1;")
|
||||||
|
|
||||||
|
tdSql.query("select case when c_float!=2.2 then 387897 else 'test message' end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [('test message',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('test message',)])
|
||||||
|
|
||||||
|
tdSql.query("select case when c_double!=2.22 then 387897 else 'test message' end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [('test message',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('test message',)])
|
||||||
|
|
||||||
|
tdSql.query("select case c_tinyint when 2 then -2147483648 when 3 then 'three' else '4294967295' end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [('-2147483648',), ('three',), ('4294967295',), ('4294967295',), ('4294967295',), ('4294967295',), ('4294967295',), ('4294967295',), ('4294967295',), ('4294967295',)])
|
||||||
|
|
||||||
|
tdSql.query("select case c_float when 2.2 then 9.2233720e+18 when 3.3 then -9.2233720e+18 else 'aa' end from st1;")
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [('9223372000000000000.000000',), ('-9223372000000000000.000000',), ('aa',), ('aa',), ('aa',), ('aa',), ('aa',), ('aa',), ('aa',), ('aa',)])
|
||||||
|
|
||||||
|
tdSql.query("select case t1.c_int when 2 then 'run' when t1.c_int is null then 'other' else t2.c_varchar end from st1 t1, st2 t2 where t1.ts=t2.ts;")
|
||||||
|
print(tdSql.res)
|
||||||
|
assert(tdSql.checkRows(10) and tdSql.res == [('run',), ('cccccccc',), ('dddddddd',), ('eeeeeeee',), ('ffffffff',), ('gggggggg',), ('hhhhhhhh',), ('iiiiiiii',), ('jjjjjjjj',), (None,)])
|
||||||
|
|
||||||
|
tdSql.query("select avg(case when c_tinyint>=2 then c_tinyint else c_null end) from st1;")
|
||||||
|
assert(tdSql.checkRows(1) and tdSql.res == [(6.0,)])
|
||||||
|
|
||||||
|
tdSql.query("select sum(case when c_tinyint>=2 then c_tinyint else c_null end) from st1;")
|
||||||
|
assert(tdSql.checkRows(1) and tdSql.res == [(54,)])
|
||||||
|
|
||||||
|
tdSql.query("select first(case when c_int >=2 then 'abc' else 0 end) from st1;")
|
||||||
|
assert(tdSql.checkRows(1) and tdSql.res == [('abc',)])
|
||||||
|
|
||||||
|
tdSql.query("select last(case when c_int >=2 then c_int else 0 end) from st1;")
|
||||||
|
assert(tdSql.checkRows(1) and tdSql.res == [(0,)])
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
self.prepare_data()
|
||||||
|
self.test_case_when_statements()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.execute("drop database test_case_when;")
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -22,6 +22,7 @@
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f query/function/cast.py
|
,,y,army,./pytest.sh python3 ./test.py -f query/function/cast.py
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f query/test_join.py
|
,,y,army,./pytest.sh python3 ./test.py -f query/test_join.py
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f query/test_compare.py
|
,,y,army,./pytest.sh python3 ./test.py -f query/test_compare.py
|
||||||
|
,,y,army,./pytest.sh python3 ./test.py -f query/test_case_when.py
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f insert/test_column_tag_boundary.py
|
,,y,army,./pytest.sh python3 ./test.py -f insert/test_column_tag_boundary.py
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f query/fill/fill_desc.py -N 3 -L 3 -D 2
|
,,y,army,./pytest.sh python3 ./test.py -f query/fill/fill_desc.py -N 3 -L 3 -D 2
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f query/fill/fill_null.py
|
,,y,army,./pytest.sh python3 ./test.py -f query/fill/fill_null.py
|
||||||
|
@ -49,7 +50,7 @@
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f query/window/base.py
|
,,y,army,./pytest.sh python3 ./test.py -f query/window/base.py
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f query/sys/tb_perf_queries_exist_test.py -N 3
|
,,y,army,./pytest.sh python3 ./test.py -f query/sys/tb_perf_queries_exist_test.py -N 3
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f query/test_having.py
|
,,y,army,./pytest.sh python3 ./test.py -f query/test_having.py
|
||||||
,,n,army,python3 ./test.py -f tmq/drop_lost_comsumers.py
|
,,n,army,python3 ./test.py -f tmq/drop_lost_comsumers.py
|
||||||
#
|
#
|
||||||
# system test
|
# system test
|
||||||
#
|
#
|
||||||
|
@ -1321,6 +1322,7 @@
|
||||||
,,y,script,./test.sh -f tsim/stream/basic3.sim
|
,,y,script,./test.sh -f tsim/stream/basic3.sim
|
||||||
,,y,script,./test.sh -f tsim/stream/basic4.sim
|
,,y,script,./test.sh -f tsim/stream/basic4.sim
|
||||||
,,y,script,./test.sh -f tsim/stream/snodeCheck.sim
|
,,y,script,./test.sh -f tsim/stream/snodeCheck.sim
|
||||||
|
,,y,script,./test.sh -f tsim/stream/concurrentcheckpt.sim
|
||||||
,,y,script,./test.sh -f tsim/stream/checkpointInterval0.sim
|
,,y,script,./test.sh -f tsim/stream/checkpointInterval0.sim
|
||||||
,,y,script,./test.sh -f tsim/stream/checkStreamSTable1.sim
|
,,y,script,./test.sh -f tsim/stream/checkStreamSTable1.sim
|
||||||
,,y,script,./test.sh -f tsim/stream/checkStreamSTable.sim
|
,,y,script,./test.sh -f tsim/stream/checkStreamSTable.sim
|
||||||
|
|
|
@ -6,15 +6,12 @@ import requests
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
import os ,sys
|
import os ,sys
|
||||||
import random
|
import random
|
||||||
import argparse
|
|
||||||
import subprocess
|
import subprocess
|
||||||
import time
|
|
||||||
import platform
|
|
||||||
|
|
||||||
# valgrind mode ?
|
# valgrind mode ?
|
||||||
valgrind_mode = False
|
valgrind_mode = False
|
||||||
|
|
||||||
msg_dict = {0:"success" , 1:"failed" , 2:"other errors" , 3:"crash occured" , 4:"Invalid read/write" , 5:"memory leak" }
|
msg_dict = {0: "success", 1: "failed", 2: "other errors", 3: "crash occured", 4: "Invalid read/write", 5: "memory leak", 6: "dead locked"}
|
||||||
|
|
||||||
# formal
|
# formal
|
||||||
hostname = socket.gethostname()
|
hostname = socket.gethostname()
|
||||||
|
@ -112,9 +109,9 @@ def random_args(args_list):
|
||||||
# args_list["--connector-type"]=connect_types[random.randint(0,2)]
|
# args_list["--connector-type"]=connect_types[random.randint(0,2)]
|
||||||
args_list["--connector-type"]= connect_types[0]
|
args_list["--connector-type"]= connect_types[0]
|
||||||
args_list["--max-dbs"]= random.randint(1,10)
|
args_list["--max-dbs"]= random.randint(1,10)
|
||||||
|
|
||||||
# dnodes = [1,3] # set single dnodes;
|
# dnodes = [1,3] # set single dnodes;
|
||||||
|
|
||||||
# args_list["--num-dnodes"]= random.sample(dnodes,1)[0]
|
# args_list["--num-dnodes"]= random.sample(dnodes,1)[0]
|
||||||
# args_list["--num-replicas"]= random.randint(1,args_list["--num-dnodes"])
|
# args_list["--num-replicas"]= random.randint(1,args_list["--num-dnodes"])
|
||||||
args_list["--debug"]=False
|
args_list["--debug"]=False
|
||||||
|
@ -125,7 +122,7 @@ def random_args(args_list):
|
||||||
|
|
||||||
# args_list["--ignore-errors"]=[] ## can add error codes for detail
|
# args_list["--ignore-errors"]=[] ## can add error codes for detail
|
||||||
|
|
||||||
|
|
||||||
args_list["--run-tdengine"]= False
|
args_list["--run-tdengine"]= False
|
||||||
args_list["--use-shadow-db"]= False
|
args_list["--use-shadow-db"]= False
|
||||||
args_list["--dynamic-db-table-names"]= True
|
args_list["--dynamic-db-table-names"]= True
|
||||||
|
@ -162,7 +159,7 @@ def random_args(args_list):
|
||||||
if args_list["--larger-data"]:
|
if args_list["--larger-data"]:
|
||||||
threads = [16,32]
|
threads = [16,32]
|
||||||
else:
|
else:
|
||||||
threads = [32,64,128,256]
|
threads = [32,64,128,256]
|
||||||
args_list["--num-threads"]=random.sample(threads,1)[0] #$ debug
|
args_list["--num-threads"]=random.sample(threads,1)[0] #$ debug
|
||||||
|
|
||||||
return args_list
|
return args_list
|
||||||
|
@ -176,7 +173,7 @@ def limits(args_list):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# env is start by test frame , not crash_gen instance
|
# env is start by test frame , not crash_gen instance
|
||||||
|
|
||||||
# elif args_list["--num-replicas"]==0:
|
# elif args_list["--num-replicas"]==0:
|
||||||
# print(" make sure num-replicas is at least 1 ")
|
# print(" make sure num-replicas is at least 1 ")
|
||||||
# args_list["--num-replicas"]=1
|
# args_list["--num-replicas"]=1
|
||||||
|
@ -186,10 +183,10 @@ def limits(args_list):
|
||||||
# elif args_list["--num-replicas"]>1:
|
# elif args_list["--num-replicas"]>1:
|
||||||
# if not args_list["--auto-start-service"]:
|
# if not args_list["--auto-start-service"]:
|
||||||
# print("it should be deployed by crash_gen auto-start-service for multi replicas")
|
# print("it should be deployed by crash_gen auto-start-service for multi replicas")
|
||||||
|
|
||||||
# else:
|
# else:
|
||||||
# pass
|
# pass
|
||||||
|
|
||||||
return args_list
|
return args_list
|
||||||
|
|
||||||
def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode):
|
def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode):
|
||||||
|
@ -216,9 +213,9 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode):
|
||||||
arguments+=""
|
arguments+=""
|
||||||
else:
|
else:
|
||||||
arguments+=(k+"="+str(v)+" ")
|
arguments+=(k+"="+str(v)+" ")
|
||||||
|
|
||||||
if valgrind :
|
if valgrind :
|
||||||
|
|
||||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203,0x4012 '%(crash_gen_path ,arguments)
|
crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203,0x4012 '%(crash_gen_path ,arguments)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
@ -239,7 +236,7 @@ def start_taosd():
|
||||||
start_cmd = 'cd %s && python3 test.py >>/dev/null '%(start_path)
|
start_cmd = 'cd %s && python3 test.py >>/dev/null '%(start_path)
|
||||||
os.system(start_cmd)
|
os.system(start_cmd)
|
||||||
|
|
||||||
def get_cmds(args_list):
|
def get_cmds(args_list):
|
||||||
crash_gen_cmd = get_auto_mix_cmds(args_list,valgrind=valgrind_mode)
|
crash_gen_cmd = get_auto_mix_cmds(args_list,valgrind=valgrind_mode)
|
||||||
return crash_gen_cmd
|
return crash_gen_cmd
|
||||||
|
|
||||||
|
@ -276,11 +273,15 @@ def check_status():
|
||||||
os.system("tail -n 50 %s>>%s"%(result_file,exit_status_logs))
|
os.system("tail -n 50 %s>>%s"%(result_file,exit_status_logs))
|
||||||
|
|
||||||
core_check = subprocess.Popen('ls -l %s | grep "^-" | wc -l'%core_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
core_check = subprocess.Popen('ls -l %s | grep "^-" | wc -l'%core_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||||
|
dead_lock_check = subprocess.Popen("grep -i 'dead locked' %s "%result_file, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||||
|
|
||||||
if int(core_check.strip().rstrip()) > 0:
|
if int(core_check.strip().rstrip()) > 0:
|
||||||
# it means core files has occured
|
# it means core files has occured
|
||||||
return 3
|
return 3
|
||||||
|
|
||||||
|
if dead_lock_check:
|
||||||
|
return 6
|
||||||
|
|
||||||
if "Crash_Gen is now exiting with status code: 1" in run_code:
|
if "Crash_Gen is now exiting with status code: 1" in run_code:
|
||||||
return 1
|
return 1
|
||||||
elif "Crash_Gen is now exiting with status code: 0" in run_code:
|
elif "Crash_Gen is now exiting with status code: 0" in run_code:
|
||||||
|
@ -293,7 +294,7 @@ def main():
|
||||||
|
|
||||||
args_list = {"--auto-start-service":False ,"--max-dbs":0,"--connector-type":"native","--debug":False,"--run-tdengine":False,"--ignore-errors":[],
|
args_list = {"--auto-start-service":False ,"--max-dbs":0,"--connector-type":"native","--debug":False,"--run-tdengine":False,"--ignore-errors":[],
|
||||||
"--track-memory-leaks":False , "--larger-data":False, "--mix-oos-data":False, "--dynamic-db-table-names":False,
|
"--track-memory-leaks":False , "--larger-data":False, "--mix-oos-data":False, "--dynamic-db-table-names":False,
|
||||||
"--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False ,
|
"--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False ,
|
||||||
"--continue-on-exception":False }
|
"--continue-on-exception":False }
|
||||||
|
|
||||||
args = random_args(args_list)
|
args = random_args(args_list)
|
||||||
|
@ -301,24 +302,24 @@ def main():
|
||||||
|
|
||||||
|
|
||||||
build_path = get_path()
|
build_path = get_path()
|
||||||
|
|
||||||
if repo =="community":
|
if repo =="community":
|
||||||
crash_gen_path = build_path[:-5]+"community/tests/pytest/"
|
crash_gen_path = build_path[:-5]+"community/tests/pytest/"
|
||||||
elif repo =="TDengine":
|
elif repo =="TDengine":
|
||||||
crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if os.path.exists(crash_gen_path+"crash_gen.sh"):
|
if os.path.exists(crash_gen_path+"crash_gen.sh"):
|
||||||
print(" make sure crash_gen.sh is ready")
|
print(" make sure crash_gen.sh is ready")
|
||||||
else:
|
else:
|
||||||
print( " crash_gen.sh is not exists ")
|
print( " crash_gen.sh is not exists ")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
git_commit = subprocess.Popen("cd %s && git log | head -n1"%crash_gen_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")[7:16]
|
git_commit = subprocess.Popen("cd %s && git log | head -n1"%crash_gen_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")[7:16]
|
||||||
|
|
||||||
# crash_cmds = get_cmds()
|
# crash_cmds = get_cmds()
|
||||||
|
|
||||||
crash_cmds = get_cmds(args)
|
crash_cmds = get_cmds(args)
|
||||||
# clean run_dir
|
# clean run_dir
|
||||||
os.system('rm -rf %s'%run_dir )
|
os.system('rm -rf %s'%run_dir )
|
||||||
|
@ -329,9 +330,9 @@ def main():
|
||||||
run_crash_gen(crash_cmds)
|
run_crash_gen(crash_cmds)
|
||||||
endtime = datetime.datetime.now()
|
endtime = datetime.datetime.now()
|
||||||
status = check_status()
|
status = check_status()
|
||||||
|
|
||||||
print("exit status : ", status)
|
print("exit status : ", status)
|
||||||
|
|
||||||
if status ==4:
|
if status ==4:
|
||||||
print('======== crash_gen found memory bugs ========')
|
print('======== crash_gen found memory bugs ========')
|
||||||
if status ==5:
|
if status ==5:
|
||||||
|
@ -344,15 +345,15 @@ def main():
|
||||||
try:
|
try:
|
||||||
cmd = crash_cmds.split('&')[2]
|
cmd = crash_cmds.split('&')[2]
|
||||||
if status == 0:
|
if status == 0:
|
||||||
log_dir = "none"
|
log_dir = "none"
|
||||||
else:
|
else:
|
||||||
log_dir= "/root/pxiao/crash_gen_logs"
|
log_dir= "/root/pxiao/crash_gen_logs"
|
||||||
|
|
||||||
if status == 3:
|
if status == 3:
|
||||||
core_dir = "/root/pxiao/crash_gen_logs"
|
core_dir = "/root/pxiao/crash_gen_logs"
|
||||||
else:
|
else:
|
||||||
core_dir = "none"
|
core_dir = "none"
|
||||||
|
|
||||||
text = f'''
|
text = f'''
|
||||||
exit status: {msg_dict[status]}
|
exit status: {msg_dict[status]}
|
||||||
test scope: crash_gen
|
test scope: crash_gen
|
||||||
|
@ -364,12 +365,12 @@ def main():
|
||||||
log dir: {log_dir}
|
log dir: {log_dir}
|
||||||
core dir: {core_dir}
|
core dir: {core_dir}
|
||||||
cmd: {cmd}'''
|
cmd: {cmd}'''
|
||||||
|
|
||||||
send_msg(get_msg(text))
|
send_msg(get_msg(text))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("exception:", e)
|
print("exception:", e)
|
||||||
exit(status)
|
exit(status)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|
|
@ -9,15 +9,12 @@ import requests
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
import os ,sys
|
import os ,sys
|
||||||
import random
|
import random
|
||||||
import argparse
|
|
||||||
import subprocess
|
import subprocess
|
||||||
import time
|
|
||||||
import platform
|
|
||||||
|
|
||||||
# valgrind mode ?
|
# valgrind mode ?
|
||||||
valgrind_mode = True
|
valgrind_mode = True
|
||||||
|
|
||||||
msg_dict = {0:"success" , 1:"failed" , 2:"other errors" , 3:"crash occured" , 4:"Invalid read/write" , 5:"memory leak" }
|
msg_dict = {0: "success", 1: "failed", 2: "other errors", 3: "crash occured", 4: "Invalid read/write", 5: "memory leak", 6: "dead locked"}
|
||||||
|
|
||||||
# formal
|
# formal
|
||||||
hostname = socket.gethostname()
|
hostname = socket.gethostname()
|
||||||
|
@ -48,6 +45,7 @@ def send_msg(json):
|
||||||
'Content-Type': 'application/json'
|
'Content-Type': 'application/json'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
req = requests.post(url=group_url, headers=headers, json=json)
|
req = requests.post(url=group_url, headers=headers, json=json)
|
||||||
inf = req.json()
|
inf = req.json()
|
||||||
if "StatusCode" in inf and inf["StatusCode"] == 0:
|
if "StatusCode" in inf and inf["StatusCode"] == 0:
|
||||||
|
@ -115,9 +113,9 @@ def random_args(args_list):
|
||||||
# args_list["--connector-type"]=connect_types[random.randint(0,2)]
|
# args_list["--connector-type"]=connect_types[random.randint(0,2)]
|
||||||
args_list["--connector-type"]= connect_types[0]
|
args_list["--connector-type"]= connect_types[0]
|
||||||
args_list["--max-dbs"]= random.randint(1,10)
|
args_list["--max-dbs"]= random.randint(1,10)
|
||||||
|
|
||||||
# dnodes = [1,3] # set single dnodes;
|
# dnodes = [1,3] # set single dnodes;
|
||||||
|
|
||||||
# args_list["--num-dnodes"]= random.sample(dnodes,1)[0]
|
# args_list["--num-dnodes"]= random.sample(dnodes,1)[0]
|
||||||
# args_list["--num-replicas"]= random.randint(1,args_list["--num-dnodes"])
|
# args_list["--num-replicas"]= random.randint(1,args_list["--num-dnodes"])
|
||||||
args_list["--debug"]=False
|
args_list["--debug"]=False
|
||||||
|
@ -125,13 +123,13 @@ def random_args(args_list):
|
||||||
args_list["--track-memory-leaks"]=False
|
args_list["--track-memory-leaks"]=False
|
||||||
|
|
||||||
args_list["--max-steps"]=random.randint(200,500)
|
args_list["--max-steps"]=random.randint(200,500)
|
||||||
|
|
||||||
threads = [16,32]
|
threads = [16,32]
|
||||||
|
|
||||||
args_list["--num-threads"]=random.sample(threads,1)[0] #$ debug
|
args_list["--num-threads"]=random.sample(threads,1)[0] #$ debug
|
||||||
# args_list["--ignore-errors"]=[] ## can add error codes for detail
|
# args_list["--ignore-errors"]=[] ## can add error codes for detail
|
||||||
|
|
||||||
|
|
||||||
args_list["--run-tdengine"]= False
|
args_list["--run-tdengine"]= False
|
||||||
args_list["--use-shadow-db"]= False
|
args_list["--use-shadow-db"]= False
|
||||||
args_list["--dynamic-db-table-names"]= True
|
args_list["--dynamic-db-table-names"]= True
|
||||||
|
@ -177,7 +175,7 @@ def limits(args_list):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# env is start by test frame , not crash_gen instance
|
# env is start by test frame , not crash_gen instance
|
||||||
|
|
||||||
# elif args_list["--num-replicas"]==0:
|
# elif args_list["--num-replicas"]==0:
|
||||||
# print(" make sure num-replicas is at least 1 ")
|
# print(" make sure num-replicas is at least 1 ")
|
||||||
# args_list["--num-replicas"]=1
|
# args_list["--num-replicas"]=1
|
||||||
|
@ -187,10 +185,10 @@ def limits(args_list):
|
||||||
# elif args_list["--num-replicas"]>1:
|
# elif args_list["--num-replicas"]>1:
|
||||||
# if not args_list["--auto-start-service"]:
|
# if not args_list["--auto-start-service"]:
|
||||||
# print("it should be deployed by crash_gen auto-start-service for multi replicas")
|
# print("it should be deployed by crash_gen auto-start-service for multi replicas")
|
||||||
|
|
||||||
# else:
|
# else:
|
||||||
# pass
|
# pass
|
||||||
|
|
||||||
return args_list
|
return args_list
|
||||||
|
|
||||||
def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode):
|
def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode):
|
||||||
|
@ -217,9 +215,9 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode):
|
||||||
arguments+=""
|
arguments+=""
|
||||||
else:
|
else:
|
||||||
arguments+=(k+"="+str(v)+" ")
|
arguments+=(k+"="+str(v)+" ")
|
||||||
|
|
||||||
if valgrind :
|
if valgrind :
|
||||||
|
|
||||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203,0x4012 '%(crash_gen_path ,arguments)
|
crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203,0x4012 '%(crash_gen_path ,arguments)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
@ -228,7 +226,6 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode):
|
||||||
|
|
||||||
return crash_gen_cmd
|
return crash_gen_cmd
|
||||||
|
|
||||||
|
|
||||||
def start_taosd():
|
def start_taosd():
|
||||||
build_path = get_path()
|
build_path = get_path()
|
||||||
if repo == "community":
|
if repo == "community":
|
||||||
|
@ -272,7 +269,7 @@ def check_status():
|
||||||
if int(core_check.strip().rstrip()) > 0:
|
if int(core_check.strip().rstrip()) > 0:
|
||||||
# it means core files has occured
|
# it means core files has occured
|
||||||
return 3
|
return 3
|
||||||
|
|
||||||
mem_status = check_memory()
|
mem_status = check_memory()
|
||||||
if mem_status >0:
|
if mem_status >0:
|
||||||
return mem_status
|
return mem_status
|
||||||
|
@ -281,8 +278,8 @@ def check_status():
|
||||||
elif "Crash_Gen is now exiting with status code: 0" in run_code:
|
elif "Crash_Gen is now exiting with status code: 0" in run_code:
|
||||||
return 0
|
return 0
|
||||||
else:
|
else:
|
||||||
return 2
|
return 2
|
||||||
|
|
||||||
|
|
||||||
def check_memory():
|
def check_memory():
|
||||||
|
|
||||||
|
@ -301,34 +298,37 @@ def check_memory():
|
||||||
os.mkdir(back_path)
|
os.mkdir(back_path)
|
||||||
|
|
||||||
stderr_file = os.path.join(crash_gen_path , "valgrind.err")
|
stderr_file = os.path.join(crash_gen_path , "valgrind.err")
|
||||||
|
stdout_file = os.path.join(crash_gen_path, 'valgrind.out')
|
||||||
|
|
||||||
status = 0
|
status = 0
|
||||||
|
|
||||||
grep_res = subprocess.Popen("grep -i 'Invalid read' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
grep_res = subprocess.Popen("grep -i 'Invalid read' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||||
|
|
||||||
if grep_res:
|
if grep_res:
|
||||||
# os.system("cp %s %s"%(stderr_file , back_path))
|
# os.system("cp %s %s"%(stderr_file , back_path))
|
||||||
status = 4
|
status = 4
|
||||||
|
|
||||||
grep_res = subprocess.Popen("grep -i 'Invalid write' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
grep_res = subprocess.Popen("grep -i 'Invalid write' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||||
|
|
||||||
if grep_res:
|
if grep_res:
|
||||||
# os.system("cp %s %s"%(stderr_file , back_path))
|
# os.system("cp %s %s"%(stderr_file , back_path))
|
||||||
status = 4
|
status = 4
|
||||||
|
|
||||||
grep_res = subprocess.Popen("grep -i 'taosMemoryMalloc' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
grep_res = subprocess.Popen("grep -i 'taosMemoryMalloc' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||||
|
|
||||||
if grep_res:
|
if grep_res:
|
||||||
|
# mem-leak can be also occure when exit normally when dead lock
|
||||||
# os.system("cp %s %s"%(stderr_file , back_path))
|
# os.system("cp %s %s"%(stderr_file , back_path))
|
||||||
status = 5
|
dead_lock_res = subprocess.Popen("grep -i 'dead locked' %s "%stdout_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||||
|
status = 6 if dead_lock_res else 5
|
||||||
|
|
||||||
return status
|
return status
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
args_list = {"--auto-start-service":False ,"--max-dbs":0,"--connector-type":"native","--debug":False,"--run-tdengine":False,"--ignore-errors":[],
|
args_list = {"--auto-start-service":False ,"--max-dbs":0,"--connector-type":"native","--debug":False,"--run-tdengine":False,"--ignore-errors":[],
|
||||||
"--track-memory-leaks":False , "--larger-data":False, "--mix-oos-data":False, "--dynamic-db-table-names":False,
|
"--track-memory-leaks":False , "--larger-data":False, "--mix-oos-data":False, "--dynamic-db-table-names":False,
|
||||||
"--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False ,
|
"--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False ,
|
||||||
"--continue-on-exception":False }
|
"--continue-on-exception":False }
|
||||||
|
|
||||||
args = random_args(args_list)
|
args = random_args(args_list)
|
||||||
|
@ -341,17 +341,17 @@ def main():
|
||||||
crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if os.path.exists(crash_gen_path+"crash_gen.sh"):
|
if os.path.exists(crash_gen_path+"crash_gen.sh"):
|
||||||
print(" make sure crash_gen.sh is ready")
|
print(" make sure crash_gen.sh is ready")
|
||||||
else:
|
else:
|
||||||
print( " crash_gen.sh is not exists ")
|
print( " crash_gen.sh is not exists ")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
git_commit = subprocess.Popen("cd %s && git log | head -n1"%crash_gen_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")[7:16]
|
git_commit = subprocess.Popen("cd %s && git log | head -n1"%crash_gen_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")[7:16]
|
||||||
|
|
||||||
# crash_cmds = get_cmds()
|
# crash_cmds = get_cmds()
|
||||||
|
|
||||||
crash_cmds = get_cmds(args)
|
crash_cmds = get_cmds(args)
|
||||||
|
|
||||||
# clean run_dir
|
# clean run_dir
|
||||||
|
@ -364,9 +364,9 @@ def main():
|
||||||
endtime = datetime.datetime.now()
|
endtime = datetime.datetime.now()
|
||||||
status = check_status()
|
status = check_status()
|
||||||
# back_path = os.path.join(core_path,"valgrind_report")
|
# back_path = os.path.join(core_path,"valgrind_report")
|
||||||
|
|
||||||
print("exit status : ", status)
|
print("exit status : ", status)
|
||||||
|
|
||||||
if status ==4:
|
if status ==4:
|
||||||
print('======== crash_gen found memory bugs ========')
|
print('======== crash_gen found memory bugs ========')
|
||||||
if status ==5:
|
if status ==5:
|
||||||
|
@ -379,15 +379,15 @@ def main():
|
||||||
try:
|
try:
|
||||||
cmd = crash_cmds.split('&')[2]
|
cmd = crash_cmds.split('&')[2]
|
||||||
if status == 0:
|
if status == 0:
|
||||||
log_dir = "none"
|
log_dir = "none"
|
||||||
else:
|
else:
|
||||||
log_dir= "/root/pxiao/crash_gen_logs"
|
log_dir= "/root/pxiao/crash_gen_logs"
|
||||||
|
|
||||||
if status == 3:
|
if status == 3:
|
||||||
core_dir = "/root/pxiao/crash_gen_logs"
|
core_dir = "/root/pxiao/crash_gen_logs"
|
||||||
else:
|
else:
|
||||||
core_dir = "none"
|
core_dir = "none"
|
||||||
|
|
||||||
text = f'''
|
text = f'''
|
||||||
exit status: {msg_dict[status]}
|
exit status: {msg_dict[status]}
|
||||||
test scope: crash_gen
|
test scope: crash_gen
|
||||||
|
@ -399,12 +399,12 @@ def main():
|
||||||
log dir: {log_dir}
|
log dir: {log_dir}
|
||||||
core dir: {core_dir}
|
core dir: {core_dir}
|
||||||
cmd: {cmd}'''
|
cmd: {cmd}'''
|
||||||
|
|
||||||
send_msg(get_msg(text))
|
send_msg(get_msg(text))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("exception:", e)
|
print("exception:", e)
|
||||||
exit(status)
|
exit(status)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|
|
@ -1,23 +1,17 @@
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import os
|
import os
|
||||||
import socket
|
import socket
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
import os ,sys
|
import os ,sys
|
||||||
import random
|
import random
|
||||||
import argparse
|
|
||||||
import subprocess
|
import subprocess
|
||||||
import time
|
|
||||||
import platform
|
|
||||||
|
|
||||||
# valgrind mode ?
|
# valgrind mode ?
|
||||||
valgrind_mode = True
|
valgrind_mode = True
|
||||||
|
|
||||||
msg_dict = {0:"success" , 1:"failed" , 2:"other errors" , 3:"crash occured" , 4:"Invalid read/write" , 5:"memory leak" }
|
msg_dict = {0: "success", 1: "failed", 2: "other errors", 3: "crash occured", 4: "Invalid read/write", 5: "memory leak", 6: "dead locked"}
|
||||||
|
|
||||||
# formal
|
# formal
|
||||||
hostname = socket.gethostname()
|
hostname = socket.gethostname()
|
||||||
|
@ -115,9 +109,9 @@ def random_args(args_list):
|
||||||
# args_list["--connector-type"]=connect_types[random.randint(0,2)]
|
# args_list["--connector-type"]=connect_types[random.randint(0,2)]
|
||||||
args_list["--connector-type"]= connect_types[0]
|
args_list["--connector-type"]= connect_types[0]
|
||||||
args_list["--max-dbs"]= random.randint(1,10)
|
args_list["--max-dbs"]= random.randint(1,10)
|
||||||
|
|
||||||
# dnodes = [1,3] # set single dnodes;
|
# dnodes = [1,3] # set single dnodes;
|
||||||
|
|
||||||
# args_list["--num-dnodes"]= random.sample(dnodes,1)[0]
|
# args_list["--num-dnodes"]= random.sample(dnodes,1)[0]
|
||||||
# args_list["--num-replicas"]= random.randint(1,args_list["--num-dnodes"])
|
# args_list["--num-replicas"]= random.randint(1,args_list["--num-dnodes"])
|
||||||
args_list["--debug"]=False
|
args_list["--debug"]=False
|
||||||
|
@ -125,13 +119,12 @@ def random_args(args_list):
|
||||||
args_list["--track-memory-leaks"]=False
|
args_list["--track-memory-leaks"]=False
|
||||||
|
|
||||||
args_list["--max-steps"]=random.randint(200,500)
|
args_list["--max-steps"]=random.randint(200,500)
|
||||||
|
|
||||||
threads = [16,32]
|
threads = [16,32]
|
||||||
|
|
||||||
args_list["--num-threads"]=random.sample(threads,1)[0] #$ debug
|
args_list["--num-threads"]=random.sample(threads,1)[0] #$ debug
|
||||||
# args_list["--ignore-errors"]=[] ## can add error codes for detail
|
# args_list["--ignore-errors"]=[] ## can add error codes for detail
|
||||||
|
|
||||||
|
|
||||||
args_list["--run-tdengine"]= False
|
args_list["--run-tdengine"]= False
|
||||||
args_list["--use-shadow-db"]= False
|
args_list["--use-shadow-db"]= False
|
||||||
args_list["--dynamic-db-table-names"]= True
|
args_list["--dynamic-db-table-names"]= True
|
||||||
|
@ -177,7 +170,7 @@ def limits(args_list):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# env is start by test frame , not crash_gen instance
|
# env is start by test frame , not crash_gen instance
|
||||||
|
|
||||||
# elif args_list["--num-replicas"]==0:
|
# elif args_list["--num-replicas"]==0:
|
||||||
# print(" make sure num-replicas is at least 1 ")
|
# print(" make sure num-replicas is at least 1 ")
|
||||||
# args_list["--num-replicas"]=1
|
# args_list["--num-replicas"]=1
|
||||||
|
@ -187,10 +180,9 @@ def limits(args_list):
|
||||||
# elif args_list["--num-replicas"]>1:
|
# elif args_list["--num-replicas"]>1:
|
||||||
# if not args_list["--auto-start-service"]:
|
# if not args_list["--auto-start-service"]:
|
||||||
# print("it should be deployed by crash_gen auto-start-service for multi replicas")
|
# print("it should be deployed by crash_gen auto-start-service for multi replicas")
|
||||||
|
|
||||||
# else:
|
# else:
|
||||||
# pass
|
# pass
|
||||||
|
|
||||||
return args_list
|
return args_list
|
||||||
|
|
||||||
def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode):
|
def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode):
|
||||||
|
@ -217,18 +209,13 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode):
|
||||||
arguments+=""
|
arguments+=""
|
||||||
else:
|
else:
|
||||||
arguments+=(k+"="+str(v)+" ")
|
arguments+=(k+"="+str(v)+" ")
|
||||||
|
|
||||||
if valgrind :
|
if valgrind :
|
||||||
|
|
||||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0707,0x0203,0x4012 '%(crash_gen_path ,arguments)
|
crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0707,0x0203,0x4012 '%(crash_gen_path ,arguments)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
|
||||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0014,0x0707,0x0203,0x4012'%(crash_gen_path ,arguments)
|
crash_gen_cmd = 'cd %s && ./crash_gen.sh -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0014,0x0707,0x0203,0x4012'%(crash_gen_path ,arguments)
|
||||||
|
|
||||||
return crash_gen_cmd
|
return crash_gen_cmd
|
||||||
|
|
||||||
|
|
||||||
def start_taosd():
|
def start_taosd():
|
||||||
build_path = get_path()
|
build_path = get_path()
|
||||||
if repo == "community":
|
if repo == "community":
|
||||||
|
@ -242,7 +229,7 @@ def start_taosd():
|
||||||
os.system(start_cmd +">>/dev/null")
|
os.system(start_cmd +">>/dev/null")
|
||||||
|
|
||||||
def get_cmds(args_list):
|
def get_cmds(args_list):
|
||||||
|
|
||||||
crash_gen_cmd = get_auto_mix_cmds(args_list,valgrind=valgrind_mode)
|
crash_gen_cmd = get_auto_mix_cmds(args_list,valgrind=valgrind_mode)
|
||||||
return crash_gen_cmd
|
return crash_gen_cmd
|
||||||
|
|
||||||
|
@ -272,7 +259,7 @@ def check_status():
|
||||||
if int(core_check.strip().rstrip()) > 0:
|
if int(core_check.strip().rstrip()) > 0:
|
||||||
# it means core files has occured
|
# it means core files has occured
|
||||||
return 3
|
return 3
|
||||||
|
|
||||||
mem_status = check_memory()
|
mem_status = check_memory()
|
||||||
if mem_status >0:
|
if mem_status >0:
|
||||||
return mem_status
|
return mem_status
|
||||||
|
@ -281,8 +268,7 @@ def check_status():
|
||||||
elif "Crash_Gen is now exiting with status code: 0" in run_code:
|
elif "Crash_Gen is now exiting with status code: 0" in run_code:
|
||||||
return 0
|
return 0
|
||||||
else:
|
else:
|
||||||
return 2
|
return 2
|
||||||
|
|
||||||
|
|
||||||
def check_memory():
|
def check_memory():
|
||||||
|
|
||||||
|
@ -301,57 +287,58 @@ def check_memory():
|
||||||
os.mkdir(back_path)
|
os.mkdir(back_path)
|
||||||
|
|
||||||
stderr_file = os.path.join(crash_gen_path , "valgrind.err")
|
stderr_file = os.path.join(crash_gen_path , "valgrind.err")
|
||||||
|
stdout_file = os.path.join(crash_gen_path, 'valgrind.out')
|
||||||
status = 0
|
status = 0
|
||||||
|
|
||||||
grep_res = subprocess.Popen("grep -i 'Invalid read' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
grep_res = subprocess.Popen("grep -i 'Invalid read' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||||
|
|
||||||
if grep_res:
|
if grep_res:
|
||||||
# os.system("cp %s %s"%(stderr_file , back_path))
|
# os.system("cp %s %s"%(stderr_file , back_path))
|
||||||
status = 4
|
status = 4
|
||||||
|
|
||||||
grep_res = subprocess.Popen("grep -i 'Invalid write' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
grep_res = subprocess.Popen("grep -i 'Invalid write' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||||
|
|
||||||
if grep_res:
|
if grep_res:
|
||||||
# os.system("cp %s %s"%(stderr_file , back_path))
|
# os.system("cp %s %s"%(stderr_file , back_path))
|
||||||
status = 4
|
status = 4
|
||||||
|
|
||||||
grep_res = subprocess.Popen("grep -i 'taosMemoryMalloc' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
grep_res = subprocess.Popen("grep -i 'taosMemoryMalloc' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||||
|
|
||||||
if grep_res:
|
if grep_res:
|
||||||
|
# mem-leak can be also occure when exit normally when dead lock
|
||||||
# os.system("cp %s %s"%(stderr_file , back_path))
|
# os.system("cp %s %s"%(stderr_file , back_path))
|
||||||
status = 5
|
dead_lock_res = subprocess.Popen("grep -i 'dead locked' %s "%stdout_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||||
|
status = 6 if dead_lock_res else 5
|
||||||
|
|
||||||
return status
|
return status
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
args_list = {"--auto-start-service":False ,"--max-dbs":0,"--connector-type":"native","--debug":False,"--run-tdengine":False,"--ignore-errors":[],
|
args_list = {"--auto-start-service":False ,"--max-dbs":0,"--connector-type":"native","--debug":False,"--run-tdengine":False,"--ignore-errors":[],
|
||||||
"--track-memory-leaks":False , "--larger-data":False, "--mix-oos-data":False, "--dynamic-db-table-names":False,
|
"--track-memory-leaks":False , "--larger-data":False, "--mix-oos-data":False, "--dynamic-db-table-names":False,
|
||||||
"--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False ,
|
"--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False ,
|
||||||
"--continue-on-exception":False }
|
"--continue-on-exception":False }
|
||||||
|
|
||||||
args = random_args(args_list)
|
args = random_args(args_list)
|
||||||
args = limits(args)
|
args = limits(args)
|
||||||
|
|
||||||
build_path = get_path()
|
build_path = get_path()
|
||||||
if repo =="community":
|
if repo =="community":
|
||||||
crash_gen_path = build_path[:-5]+"community/tests/pytest/"
|
crash_gen_path = build_path[:-5]+"community/tests/pytest/"
|
||||||
elif repo =="TDengine":
|
elif repo =="TDengine":
|
||||||
crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if os.path.exists(crash_gen_path+"crash_gen.sh"):
|
if os.path.exists(crash_gen_path+"crash_gen.sh"):
|
||||||
print(" make sure crash_gen.sh is ready")
|
print(" make sure crash_gen.sh is ready")
|
||||||
else:
|
else:
|
||||||
print( " crash_gen.sh is not exists ")
|
print( " crash_gen.sh is not exists ")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
git_commit = subprocess.Popen("cd %s && git log | head -n1"%crash_gen_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")[7:16]
|
git_commit = subprocess.Popen("cd %s && git log | head -n1"%crash_gen_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")[7:16]
|
||||||
|
|
||||||
# crash_cmds = get_cmds()
|
# crash_cmds = get_cmds()
|
||||||
|
|
||||||
crash_cmds = get_cmds(args)
|
crash_cmds = get_cmds(args)
|
||||||
|
|
||||||
# clean run_dir
|
# clean run_dir
|
||||||
|
@ -364,9 +351,9 @@ def main():
|
||||||
endtime = datetime.datetime.now()
|
endtime = datetime.datetime.now()
|
||||||
status = check_status()
|
status = check_status()
|
||||||
# back_path = os.path.join(core_path,"valgrind_report")
|
# back_path = os.path.join(core_path,"valgrind_report")
|
||||||
|
|
||||||
print("exit status : ", status)
|
print("exit status : ", status)
|
||||||
|
|
||||||
if status ==4:
|
if status ==4:
|
||||||
print('======== crash_gen found memory bugs ========')
|
print('======== crash_gen found memory bugs ========')
|
||||||
if status ==5:
|
if status ==5:
|
||||||
|
@ -379,15 +366,15 @@ def main():
|
||||||
try:
|
try:
|
||||||
cmd = crash_cmds.split('&')[2]
|
cmd = crash_cmds.split('&')[2]
|
||||||
if status == 0:
|
if status == 0:
|
||||||
log_dir = "none"
|
log_dir = "none"
|
||||||
else:
|
else:
|
||||||
log_dir= "/root/pxiao/crash_gen_logs"
|
log_dir= "/root/pxiao/crash_gen_logs"
|
||||||
|
|
||||||
if status == 3:
|
if status == 3:
|
||||||
core_dir = "/root/pxiao/crash_gen_logs"
|
core_dir = "/root/pxiao/crash_gen_logs"
|
||||||
else:
|
else:
|
||||||
core_dir = "none"
|
core_dir = "none"
|
||||||
|
|
||||||
text = f'''
|
text = f'''
|
||||||
exit status: {msg_dict[status]}
|
exit status: {msg_dict[status]}
|
||||||
test scope: crash_gen
|
test scope: crash_gen
|
||||||
|
@ -399,12 +386,11 @@ def main():
|
||||||
log dir: {log_dir}
|
log dir: {log_dir}
|
||||||
core dir: {core_dir}
|
core dir: {core_dir}
|
||||||
cmd: {cmd}'''
|
cmd: {cmd}'''
|
||||||
|
|
||||||
send_msg(get_msg(text))
|
send_msg(get_msg(text))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("exception:", e)
|
print("exception:", e)
|
||||||
exit(status)
|
exit(status)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|
|
@ -0,0 +1,79 @@
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/cfg.sh -n dnode1 -c supportVnodes -v 1
|
||||||
|
|
||||||
|
print ========== step1
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
sql connect
|
||||||
|
|
||||||
|
sql create database abc1 vgroups 1;
|
||||||
|
sql use abc1;
|
||||||
|
sql create table st1(ts timestamp, k int) tags(a int);
|
||||||
|
sql create table t1 using st1 tags(1);
|
||||||
|
sql create table t2 using st1 tags(2);
|
||||||
|
sql insert into t1 values(now, 1);
|
||||||
|
|
||||||
|
sql create stream str1 trigger at_once into str_dst1 as select count(*) from st1 interval(30s);
|
||||||
|
sql create stream str2 trigger at_once into str_dst2 as select count(*) from st1 interval(30s);
|
||||||
|
sql create stream str3 trigger at_once into str_dst3 as select count(*) from st1 interval(30s);
|
||||||
|
|
||||||
|
print ============== create 3 streams, check the concurrently checkpoint
|
||||||
|
sleep 180000
|
||||||
|
|
||||||
|
sql select task_id, checkpoint_id from information_schema.ins_stream_tasks order by checkpoint_id;
|
||||||
|
|
||||||
|
print $data01 $data11 $data21
|
||||||
|
if $data01 == $data11 then
|
||||||
|
print not allowed 2 checkpoint start completed
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data11 == $data21 then
|
||||||
|
print not allowed 2 checkpoints start concurrently
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||||
|
|
||||||
|
print ========== concurrent checkpoint is set 2
|
||||||
|
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/cfg.sh -n dnode1 -c concurrentCheckpoint -v 2
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
|
||||||
|
print ========== step2
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
sql connect
|
||||||
|
|
||||||
|
sql create database abc1 vgroups 1;
|
||||||
|
sql use abc1;
|
||||||
|
sql create table st1(ts timestamp, k int) tags(a int);
|
||||||
|
sql create table t1 using st1 tags(1);
|
||||||
|
sql create table t2 using st1 tags(2);
|
||||||
|
sql insert into t1 values(now, 1);
|
||||||
|
|
||||||
|
sql create stream str1 trigger at_once into str_dst1 as select count(*) from st1 interval(30s);
|
||||||
|
sql create stream str2 trigger at_once into str_dst2 as select count(*) from st1 interval(30s);
|
||||||
|
sql create stream str3 trigger at_once into str_dst3 as select count(*) from st1 interval(30s);
|
||||||
|
|
||||||
|
print ============== create 3 streams, check the concurrently checkpoint
|
||||||
|
sleep 180000
|
||||||
|
|
||||||
|
sql select count(*) a, checkpoint_id from information_schema.ins_stream_tasks group by checkpoint_id order by a;
|
||||||
|
print $data00 $data01
|
||||||
|
print $data10 $data11
|
||||||
|
|
||||||
|
if $data00 != 1 then
|
||||||
|
print expect 1, actual $data00
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
if $data10 != 2 then
|
||||||
|
print expect 2, actual $data10
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -111,6 +111,7 @@ run tsim/stream/distributeInterval0.sim
|
||||||
run tsim/stream/distributeSession0.sim
|
run tsim/stream/distributeSession0.sim
|
||||||
run tsim/stream/state0.sim
|
run tsim/stream/state0.sim
|
||||||
run tsim/stream/basic2.sim
|
run tsim/stream/basic2.sim
|
||||||
|
run tsim/stream/concurrentcheckpt.sim
|
||||||
run tsim/insert/basic1.sim
|
run tsim/insert/basic1.sim
|
||||||
run tsim/insert/commit-merge0.sim
|
run tsim/insert/commit-merge0.sim
|
||||||
run tsim/insert/basic0.sim
|
run tsim/insert/basic0.sim
|
||||||
|
|
|
@ -39,7 +39,7 @@ class TDTestCase:
|
||||||
taos_list = ['server','client']
|
taos_list = ['server','client']
|
||||||
for i in taos_list:
|
for i in taos_list:
|
||||||
tdSql.query(f'select {i}_version()')
|
tdSql.query(f'select {i}_version()')
|
||||||
version_info = str(subprocess.run('cat ../../source/util/src/version.c |grep "char version"', shell=True,capture_output=True).stdout.decode('utf8')).split('"')[1]
|
version_info = str(subprocess.run('cat ../../source/util/src/version.c |grep "char td_version"', shell=True,capture_output=True).stdout.decode('utf8')).split('"')[1]
|
||||||
tdSql.checkData(0,0,version_info)
|
tdSql.checkData(0,0,version_info)
|
||||||
|
|
||||||
def get_server_status(self):
|
def get_server_status(self):
|
||||||
|
|
|
@ -102,7 +102,7 @@ void shellPrintHelp() {
|
||||||
#include <termio.h>
|
#include <termio.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
const char *argp_program_version = version;
|
const char *argp_program_version = td_version;
|
||||||
#ifdef CUS_EMAIL
|
#ifdef CUS_EMAIL
|
||||||
const char *argp_program_bug_address = CUS_EMAIL;
|
const char *argp_program_bug_address = CUS_EMAIL;
|
||||||
#else
|
#else
|
||||||
|
@ -440,11 +440,11 @@ int32_t shellParseArgs(int32_t argc, char *argv[]) {
|
||||||
#ifdef TD_ENTERPRISE
|
#ifdef TD_ENTERPRISE
|
||||||
snprintf(shell.info.programVersion, sizeof(shell.info.programVersion),
|
snprintf(shell.info.programVersion, sizeof(shell.info.programVersion),
|
||||||
"%s\n%s version: %s compatible_version: %s\ngit: %s\ngitOfInternal: %s\nbuild: %s", TD_PRODUCT_NAME,
|
"%s\n%s version: %s compatible_version: %s\ngit: %s\ngitOfInternal: %s\nbuild: %s", TD_PRODUCT_NAME,
|
||||||
CUS_PROMPT, version, compatible_version, gitinfo, gitinfoOfInternal, buildinfo);
|
CUS_PROMPT, td_version, td_compatible_version, td_gitinfo, td_gitinfoOfInternal, td_buildinfo);
|
||||||
#else
|
#else
|
||||||
snprintf(shell.info.programVersion, sizeof(shell.info.programVersion),
|
snprintf(shell.info.programVersion, sizeof(shell.info.programVersion),
|
||||||
"%s\n%s version: %s compatible_version: %s\ngit: %s\nbuild: %s", TD_PRODUCT_NAME, CUS_PROMPT, version,
|
"%s\n%s version: %s compatible_version: %s\ngit: %s\nbuild: %s", TD_PRODUCT_NAME, CUS_PROMPT, td_version,
|
||||||
compatible_version, gitinfo, buildinfo);
|
td_compatible_version, td_gitinfo, td_buildinfo);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
|
#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32)
|
||||||
|
|
|
@ -34,7 +34,7 @@ static void shellWorkAsClient() {
|
||||||
rpcInit.user = "_dnd";
|
rpcInit.user = "_dnd";
|
||||||
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
|
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
|
||||||
|
|
||||||
taosVersionStrToInt(version, &(rpcInit.compatibilityVer));
|
taosVersionStrToInt(td_version, &rpcInit.compatibilityVer);
|
||||||
clientRpc = rpcOpen(&rpcInit);
|
clientRpc = rpcOpen(&rpcInit);
|
||||||
if (clientRpc == NULL) {
|
if (clientRpc == NULL) {
|
||||||
printf("failed to init net test client since %s\r\n", terrstr());
|
printf("failed to init net test client since %s\r\n", terrstr());
|
||||||
|
@ -125,7 +125,7 @@ static void shellWorkAsServer() {
|
||||||
rpcInit.connType = TAOS_CONN_SERVER;
|
rpcInit.connType = TAOS_CONN_SERVER;
|
||||||
rpcInit.idleTime = tsShellActivityTimer * 1000;
|
rpcInit.idleTime = tsShellActivityTimer * 1000;
|
||||||
|
|
||||||
taosVersionStrToInt(version, &(rpcInit.compatibilityVer));
|
taosVersionStrToInt(td_version, &rpcInit.compatibilityVer);
|
||||||
|
|
||||||
void *serverRpc = rpcOpen(&rpcInit);
|
void *serverRpc = rpcOpen(&rpcInit);
|
||||||
if (serverRpc == NULL) {
|
if (serverRpc == NULL) {
|
||||||
|
|
Loading…
Reference in New Issue