Merge branch '3.0' of https://github.com/taosdata/TDengine into feat/TD-22023
This commit is contained in:
commit
bc4f3d1296
|
@ -217,6 +217,7 @@
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_VIEW_META, "view-meta", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_VIEW_META, "view-meta", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_KILL_COMPACT, "kill-compact", SKillCompactReq, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_KILL_COMPACT, "kill-compact", SKillCompactReq, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_COMPACT_TIMER, "compact-tmr", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_COMPACT_TIMER, "compact-tmr", NULL, NULL)
|
||||||
|
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_REQ_CHKPT, "stream-req-checkpoint", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
|
||||||
TD_CLOSE_MSG_SEG(TDMT_END_MND_MSG)
|
TD_CLOSE_MSG_SEG(TDMT_END_MND_MSG)
|
||||||
|
|
||||||
|
@ -301,7 +302,6 @@
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_PAUSE, "stream-task-pause", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_PAUSE, "stream-task-pause", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESUME, "stream-task-resume", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESUME, "stream-task-resume", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_STOP, "stream-task-stop", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_STOP, "stream-task-stop", NULL, NULL)
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_HTASK_DROP, "stream-htask-drop", NULL, NULL)
|
|
||||||
TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL)
|
TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL)
|
||||||
TD_CLOSE_MSG_SEG(TDMT_END_STREAM_MSG)
|
TD_CLOSE_MSG_SEG(TDMT_END_STREAM_MSG)
|
||||||
|
|
||||||
|
|
|
@ -462,7 +462,6 @@ struct SStreamTask {
|
||||||
struct SStreamMeta* pMeta;
|
struct SStreamMeta* pMeta;
|
||||||
SSHashObj* pNameMap;
|
SSHashObj* pNameMap;
|
||||||
void* pBackend;
|
void* pBackend;
|
||||||
int64_t backendRefId;
|
|
||||||
char reserve[256];
|
char reserve[256];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -640,6 +639,7 @@ typedef struct {
|
||||||
int32_t tEncodeStreamScanHistoryFinishReq(SEncoder* pEncoder, const SStreamScanHistoryFinishReq* pReq);
|
int32_t tEncodeStreamScanHistoryFinishReq(SEncoder* pEncoder, const SStreamScanHistoryFinishReq* pReq);
|
||||||
int32_t tDecodeStreamScanHistoryFinishReq(SDecoder* pDecoder, SStreamScanHistoryFinishReq* pReq);
|
int32_t tDecodeStreamScanHistoryFinishReq(SDecoder* pDecoder, SStreamScanHistoryFinishReq* pReq);
|
||||||
|
|
||||||
|
// mndTrigger: denote if this checkpoint is triggered by mnode or as requested from tasks when transfer-state finished
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int64_t streamId;
|
int64_t streamId;
|
||||||
int64_t checkpointId;
|
int64_t checkpointId;
|
||||||
|
@ -648,6 +648,7 @@ typedef struct {
|
||||||
SEpSet mgmtEps;
|
SEpSet mgmtEps;
|
||||||
int32_t mnodeId;
|
int32_t mnodeId;
|
||||||
int32_t transId;
|
int32_t transId;
|
||||||
|
int8_t mndTrigger;
|
||||||
int64_t expireTime;
|
int64_t expireTime;
|
||||||
} SStreamCheckpointSourceReq;
|
} SStreamCheckpointSourceReq;
|
||||||
|
|
||||||
|
@ -770,6 +771,15 @@ int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
|
||||||
void tDeleteStreamRetrieveReq(SStreamRetrieveReq* pReq);
|
void tDeleteStreamRetrieveReq(SStreamRetrieveReq* pReq);
|
||||||
void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq);
|
void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq);
|
||||||
|
|
||||||
|
typedef struct SStreamTaskCheckpointReq {
|
||||||
|
int64_t streamId;
|
||||||
|
int32_t taskId;
|
||||||
|
int32_t nodeId;
|
||||||
|
} SStreamTaskCheckpointReq;
|
||||||
|
|
||||||
|
int32_t tEncodeStreamTaskCheckpointReq(SEncoder* pEncoder, const SStreamTaskCheckpointReq* pReq);
|
||||||
|
int32_t tDecodeStreamTaskCheckpointReq(SDecoder* pDecoder, SStreamTaskCheckpointReq* pReq);
|
||||||
|
|
||||||
int32_t streamSetupScheduleTrigger(SStreamTask* pTask);
|
int32_t streamSetupScheduleTrigger(SStreamTask* pTask);
|
||||||
|
|
||||||
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg);
|
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg);
|
||||||
|
@ -792,6 +802,7 @@ SStreamTaskState* streamTaskGetStatus(const SStreamTask* pTask);
|
||||||
const char* streamTaskGetStatusStr(ETaskStatus status);
|
const char* streamTaskGetStatusStr(ETaskStatus status);
|
||||||
void streamTaskResetStatus(SStreamTask* pTask);
|
void streamTaskResetStatus(SStreamTask* pTask);
|
||||||
void streamTaskSetStatusReady(SStreamTask* pTask);
|
void streamTaskSetStatusReady(SStreamTask* pTask);
|
||||||
|
ETaskStatus streamTaskGetPrevStatus(const SStreamTask* pTask);
|
||||||
|
|
||||||
void initRpcMsg(SRpcMsg* pMsg, int32_t msgType, void* pCont, int32_t contLen);
|
void initRpcMsg(SRpcMsg* pMsg, int32_t msgType, void* pCont, int32_t contLen);
|
||||||
|
|
||||||
|
@ -806,7 +817,7 @@ bool streamTaskIsAllUpstreamClosed(SStreamTask* pTask);
|
||||||
bool streamTaskSetSchedStatusWait(SStreamTask* pTask);
|
bool streamTaskSetSchedStatusWait(SStreamTask* pTask);
|
||||||
int8_t streamTaskSetSchedStatusActive(SStreamTask* pTask);
|
int8_t streamTaskSetSchedStatusActive(SStreamTask* pTask);
|
||||||
int8_t streamTaskSetSchedStatusInactive(SStreamTask* pTask);
|
int8_t streamTaskSetSchedStatusInactive(SStreamTask* pTask);
|
||||||
int32_t streamTaskClearHTaskAttr(SStreamTask* pTask);
|
int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, bool metaLock);
|
||||||
|
|
||||||
int32_t streamTaskHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event);
|
int32_t streamTaskHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event);
|
||||||
int32_t streamTaskHandleEventAsync(SStreamTaskSM* pSM, EStreamTaskEvent event, void* pFn);
|
int32_t streamTaskHandleEventAsync(SStreamTaskSM* pSM, EStreamTaskEvent event, void* pFn);
|
||||||
|
@ -839,6 +850,7 @@ void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId);
|
||||||
void streamTaskOpenAllUpstreamInput(SStreamTask* pTask);
|
void streamTaskOpenAllUpstreamInput(SStreamTask* pTask);
|
||||||
int32_t streamTaskSetDb(SStreamMeta* pMeta, void* pTask, char* key);
|
int32_t streamTaskSetDb(SStreamMeta* pMeta, void* pTask, char* key);
|
||||||
bool streamTaskIsSinkTask(const SStreamTask* pTask);
|
bool streamTaskIsSinkTask(const SStreamTask* pTask);
|
||||||
|
int32_t streamTaskSendCheckpointReq(SStreamTask* pTask);
|
||||||
|
|
||||||
void streamTaskStatusInit(STaskStatusEntry* pEntry, const SStreamTask* pTask);
|
void streamTaskStatusInit(STaskStatusEntry* pEntry, const SStreamTask* pTask);
|
||||||
void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc);
|
void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc);
|
||||||
|
@ -866,6 +878,7 @@ int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta);
|
||||||
SStreamTask* streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
SStreamTask* streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
||||||
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
||||||
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
||||||
|
SStreamTask* streamMetaAcquireOneTask(SStreamTask* pTask);
|
||||||
void streamMetaClear(SStreamMeta* pMeta);
|
void streamMetaClear(SStreamMeta* pMeta);
|
||||||
void streamMetaInitBackend(SStreamMeta* pMeta);
|
void streamMetaInitBackend(SStreamMeta* pMeta);
|
||||||
int32_t streamMetaCommit(SStreamMeta* pMeta);
|
int32_t streamMetaCommit(SStreamMeta* pMeta);
|
||||||
|
|
|
@ -223,6 +223,7 @@ SArray *mmGetMsgHandles() {
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_UPDATE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_UPDATE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_RESET_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_RESET_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_HEARTBEAT, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_HEARTBEAT, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
|
||||||
|
if (dmSetMgmtHandle(pArray, TDMT_MND_STREAM_REQ_CHKPT, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_KILL_COMPACT_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_KILL_COMPACT_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
|
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIG_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIG_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
|
|
|
@ -84,7 +84,6 @@ SArray *smGetMsgHandles() {
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_PAUSE, smPutNodeMsgToMgmtQueue, 1) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_PAUSE, smPutNodeMsgToMgmtQueue, 1) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_RESUME, smPutNodeMsgToMgmtQueue, 1) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_RESUME, smPutNodeMsgToMgmtQueue, 1) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_STOP, smPutNodeMsgToMgmtQueue, 1) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_STOP, smPutNodeMsgToMgmtQueue, 1) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_HTASK_DROP, smPutNodeMsgToMgmtQueue, 1) == NULL) goto _OVER;
|
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_CHECK, smPutNodeMsgToStreamQueue, 1) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_CHECK, smPutNodeMsgToStreamQueue, 1) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_CHECK_RSP, smPutNodeMsgToStreamQueue, 1) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_CHECK_RSP, smPutNodeMsgToStreamQueue, 1) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_SCAN_HISTORY_FINISH, smPutNodeMsgToStreamQueue, 1) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_SCAN_HISTORY_FINISH, smPutNodeMsgToStreamQueue, 1) == NULL) goto _OVER;
|
||||||
|
|
|
@ -835,7 +835,6 @@ SArray *vmGetMsgHandles() {
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_PAUSE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_PAUSE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_RESUME, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_RESUME, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_STOP, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_STOP, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_HTASK_DROP, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_CHECK_POINT_SOURCE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_CHECK_POINT_SOURCE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECKPOINT_READY, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECKPOINT_READY, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_UPDATE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_UPDATE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||||
|
|
|
@ -17,11 +17,15 @@
|
||||||
#define _TD_MND_STREAM_H_
|
#define _TD_MND_STREAM_H_
|
||||||
|
|
||||||
#include "mndInt.h"
|
#include "mndInt.h"
|
||||||
|
#include "mndTrans.h"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define MND_STREAM_RESERVE_SIZE 64
|
||||||
|
#define MND_STREAM_VER_NUMBER 4
|
||||||
|
|
||||||
typedef struct SStreamTransInfo {
|
typedef struct SStreamTransInfo {
|
||||||
int64_t startTime;
|
int64_t startTime;
|
||||||
int64_t streamUid;
|
int64_t streamUid;
|
||||||
|
@ -50,8 +54,22 @@ typedef struct SStreamExecInfo {
|
||||||
SHashObj *pTaskMap;
|
SHashObj *pTaskMap;
|
||||||
SArray *pTaskList;
|
SArray *pTaskList;
|
||||||
TdThreadMutex lock;
|
TdThreadMutex lock;
|
||||||
|
SHashObj *pTransferStateStreams;
|
||||||
} SStreamExecInfo;
|
} SStreamExecInfo;
|
||||||
|
|
||||||
|
typedef struct SNodeEntry {
|
||||||
|
int32_t nodeId;
|
||||||
|
bool stageUpdated; // the stage has been updated due to the leader/follower change or node reboot.
|
||||||
|
SEpSet epset; // compare the epset to identify the vgroup tranferring between different dnodes.
|
||||||
|
int64_t hbTimestamp; // second
|
||||||
|
} SNodeEntry;
|
||||||
|
|
||||||
|
typedef struct SFailedCheckpointInfo {
|
||||||
|
int64_t streamUid;
|
||||||
|
int64_t checkpointId;
|
||||||
|
int32_t transId;
|
||||||
|
} SFailedCheckpointInfo;
|
||||||
|
|
||||||
#define MND_STREAM_CREATE_NAME "stream-create"
|
#define MND_STREAM_CREATE_NAME "stream-create"
|
||||||
#define MND_STREAM_CHECKPOINT_NAME "stream-checkpoint"
|
#define MND_STREAM_CHECKPOINT_NAME "stream-checkpoint"
|
||||||
#define MND_STREAM_PAUSE_NAME "stream-pause"
|
#define MND_STREAM_PAUSE_NAME "stream-pause"
|
||||||
|
@ -67,7 +85,7 @@ void mndCleanupStream(SMnode *pMnode);
|
||||||
SStreamObj *mndAcquireStream(SMnode *pMnode, char *streamName);
|
SStreamObj *mndAcquireStream(SMnode *pMnode, char *streamName);
|
||||||
void mndReleaseStream(SMnode *pMnode, SStreamObj *pStream);
|
void mndReleaseStream(SMnode *pMnode, SStreamObj *pStream);
|
||||||
int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb);
|
int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb);
|
||||||
int32_t mndPersistStream(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream);
|
int32_t mndPersistStream(STrans *pTrans, SStreamObj *pStream);
|
||||||
|
|
||||||
int32_t mndStreamRegisterTrans(STrans* pTrans, const char* pTransName, int64_t streamUid);
|
int32_t mndStreamRegisterTrans(STrans* pTrans, const char* pTransName, int64_t streamUid);
|
||||||
int32_t mndAddtoCheckpointWaitingList(SStreamObj *pStream, int64_t checkpointId);
|
int32_t mndAddtoCheckpointWaitingList(SStreamObj *pStream, int64_t checkpointId);
|
||||||
|
@ -80,6 +98,21 @@ int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream);
|
||||||
int32_t mndPersistDropStreamLog(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream);
|
int32_t mndPersistDropStreamLog(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream);
|
||||||
|
|
||||||
int32_t mndGetNumOfStreams(SMnode *pMnode, char *dbName, int32_t *pNumOfStreams);
|
int32_t mndGetNumOfStreams(SMnode *pMnode, char *dbName, int32_t *pNumOfStreams);
|
||||||
|
int32_t mndGetNumOfStreamTasks(const SStreamObj *pStream);
|
||||||
|
SArray *mndTakeVgroupSnapshot(SMnode *pMnode, bool *allReady);
|
||||||
|
void mndKillTransImpl(SMnode *pMnode, int32_t transId, const char *pDbName);
|
||||||
|
void initTransAction(STransAction *pAction, void *pCont, int32_t contLen, int32_t msgType, const SEpSet *pEpset,
|
||||||
|
int32_t retryCode);
|
||||||
|
STrans *doCreateTrans(SMnode *pMnode, SStreamObj *pStream, SRpcMsg *pReq, const char *name, const char *pMsg);
|
||||||
|
int32_t mndPersistTransLog(SStreamObj *pStream, STrans *pTrans, int32_t status);
|
||||||
|
SSdbRaw *mndStreamActionEncode(SStreamObj *pStream);
|
||||||
|
SStreamObj *mndGetStreamObj(SMnode *pMnode, int64_t streamId);
|
||||||
|
int32_t extractNodeEpset(SMnode *pMnode, SEpSet *pEpSet, bool *hasEpset, int32_t taskId, int32_t nodeId);
|
||||||
|
int32_t mndProcessStreamHb(SRpcMsg *pReq);
|
||||||
|
void saveStreamTasksInfo(SStreamObj *pStream, SStreamExecInfo *pExecNode);
|
||||||
|
int32_t initStreamNodeList(SMnode *pMnode);
|
||||||
|
int32_t mndResumeStreamTasks(STrans *pTrans, SMnode *pMnode, SStreamObj* pStream, int8_t igUntreated);
|
||||||
|
int32_t mndPauseStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,8 @@
|
||||||
#include "mndDef.h"
|
#include "mndDef.h"
|
||||||
#include "mndConsumer.h"
|
#include "mndConsumer.h"
|
||||||
|
|
||||||
|
static void *freeStreamTasks(SArray *pTaskLevel);
|
||||||
|
|
||||||
int32_t tEncodeSStreamObj(SEncoder *pEncoder, const SStreamObj *pObj) {
|
int32_t tEncodeSStreamObj(SEncoder *pEncoder, const SStreamObj *pObj) {
|
||||||
if (tStartEncode(pEncoder) < 0) return -1;
|
if (tStartEncode(pEncoder) < 0) return -1;
|
||||||
if (tEncodeCStr(pEncoder, pObj->name) < 0) return -1;
|
if (tEncodeCStr(pEncoder, pObj->name) < 0) return -1;
|
||||||
|
@ -121,11 +123,18 @@ int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj, int32_t sver) {
|
||||||
if (tDecodeCStrAlloc(pDecoder, &pObj->ast) < 0) return -1;
|
if (tDecodeCStrAlloc(pDecoder, &pObj->ast) < 0) return -1;
|
||||||
if (tDecodeCStrAlloc(pDecoder, &pObj->physicalPlan) < 0) return -1;
|
if (tDecodeCStrAlloc(pDecoder, &pObj->physicalPlan) < 0) return -1;
|
||||||
|
|
||||||
pObj->tasks = NULL;
|
if (pObj->tasks != NULL) {
|
||||||
|
pObj->tasks = freeStreamTasks(pObj->tasks);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t sz;
|
int32_t sz;
|
||||||
if (tDecodeI32(pDecoder, &sz) < 0) return -1;
|
if (tDecodeI32(pDecoder, &sz) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if (sz != 0) {
|
if (sz != 0) {
|
||||||
pObj->tasks = taosArrayInit(sz, sizeof(void *));
|
pObj->tasks = taosArrayInit(sz, sizeof(void *));
|
||||||
|
|
||||||
for (int32_t i = 0; i < sz; i++) {
|
for (int32_t i = 0; i < sz; i++) {
|
||||||
int32_t innerSz;
|
int32_t innerSz;
|
||||||
if (tDecodeI32(pDecoder, &innerSz) < 0) return -1;
|
if (tDecodeI32(pDecoder, &innerSz) < 0) return -1;
|
||||||
|
@ -165,8 +174,9 @@ int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj, int32_t sver) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *freeStreamTasks(SArray *pTaskLevel) {
|
void *freeStreamTasks(SArray *pTaskLevel) {
|
||||||
int32_t numOfLevel = taosArrayGetSize(pTaskLevel);
|
int32_t numOfLevel = taosArrayGetSize(pTaskLevel);
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfLevel; i++) {
|
for (int32_t i = 0; i < numOfLevel; i++) {
|
||||||
SArray *pLevel = taosArrayGetP(pTaskLevel, i);
|
SArray *pLevel = taosArrayGetP(pTaskLevel, i);
|
||||||
int32_t taskSz = taosArrayGetSize(pLevel);
|
int32_t taskSz = taosArrayGetSize(pLevel);
|
||||||
|
|
|
@ -767,7 +767,7 @@ _OVER:
|
||||||
pMsg->msgType == TDMT_MND_TRIM_DB_TIMER || pMsg->msgType == TDMT_MND_UPTIME_TIMER ||
|
pMsg->msgType == TDMT_MND_TRIM_DB_TIMER || pMsg->msgType == TDMT_MND_UPTIME_TIMER ||
|
||||||
pMsg->msgType == TDMT_MND_COMPACT_TIMER || pMsg->msgType == TDMT_MND_NODECHECK_TIMER ||
|
pMsg->msgType == TDMT_MND_COMPACT_TIMER || pMsg->msgType == TDMT_MND_NODECHECK_TIMER ||
|
||||||
pMsg->msgType == TDMT_MND_GRANT_HB_TIMER || pMsg->msgType == TDMT_MND_STREAM_CHECKPOINT_CANDIDITATE ||
|
pMsg->msgType == TDMT_MND_GRANT_HB_TIMER || pMsg->msgType == TDMT_MND_STREAM_CHECKPOINT_CANDIDITATE ||
|
||||||
pMsg->msgType == TDMT_MND_STREAM_CHECKPOINT_TIMER) {
|
pMsg->msgType == TDMT_MND_STREAM_CHECKPOINT_TIMER || pMsg->msgType == TDMT_MND_STREAM_REQ_CHKPT) {
|
||||||
mTrace("timer not process since mnode restored:%d stopped:%d, sync restored:%d role:%s ", pMnode->restored,
|
mTrace("timer not process since mnode restored:%d stopped:%d, sync restored:%d role:%s ", pMnode->restored,
|
||||||
pMnode->stopped, state.restored, syncStr(state.state));
|
pMnode->stopped, state.restored, syncStr(state.state));
|
||||||
return -1;
|
return -1;
|
||||||
|
|
|
@ -639,7 +639,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
|
||||||
if (mndSetUpdateSmaStbCommitLogs(pMnode, pTrans, pStb) != 0) goto _OVER;
|
if (mndSetUpdateSmaStbCommitLogs(pMnode, pTrans, pStb) != 0) goto _OVER;
|
||||||
if (mndSetCreateSmaVgroupRedoActions(pMnode, pTrans, pDb, &streamObj.fixedSinkVg, &smaObj) != 0) goto _OVER;
|
if (mndSetCreateSmaVgroupRedoActions(pMnode, pTrans, pDb, &streamObj.fixedSinkVg, &smaObj) != 0) goto _OVER;
|
||||||
if (mndScheduleStream(pMnode, &streamObj, 1685959190000) != 0) goto _OVER;
|
if (mndScheduleStream(pMnode, &streamObj, 1685959190000) != 0) goto _OVER;
|
||||||
if (mndPersistStream(pMnode, pTrans, &streamObj) != 0) goto _OVER;
|
if (mndPersistStream(pTrans, &streamObj) != 0) goto _OVER;
|
||||||
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
|
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
|
||||||
|
|
||||||
mInfo("sma:%s, uid:%" PRIi64 " create on stb:%" PRIi64 ", dstSuid:%" PRIi64 " dstTb:%s dstVg:%d", pCreate->name,
|
mInfo("sma:%s, uid:%" PRIi64 " create on stb:%" PRIi64 ", dstSuid:%" PRIi64 " dstTb:%s dstVg:%d", pCreate->name,
|
||||||
|
@ -872,7 +872,7 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p
|
||||||
}
|
}
|
||||||
|
|
||||||
// drop stream
|
// drop stream
|
||||||
if (mndPersistDropStreamLog(pMnode, pTrans, pStream) < 0) {
|
if (mndPersistTransLog(pStream, pTrans, SDB_STATUS_DROPPED) < 0) {
|
||||||
mError("stream:%s, failed to drop log since %s", pStream->name, terrstr());
|
mError("stream:%s, failed to drop log since %s", pStream->name, terrstr());
|
||||||
sdbRelease(pMnode->pSdb, pStream);
|
sdbRelease(pMnode->pSdb, pStream);
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
|
@ -923,7 +923,7 @@ int32_t mndDropSmasByStb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *p
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mndPersistDropStreamLog(pMnode, pTrans, pStream) < 0) {
|
if (mndPersistTransLog(pStream, pTrans, SDB_STATUS_DROPPED) < 0) {
|
||||||
mndReleaseStream(pMnode, pStream);
|
mndReleaseStream(pMnode, pStream);
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,297 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "mndStream.h"
|
||||||
|
#include "mndTrans.h"
|
||||||
|
|
||||||
|
static void doExtractTasksFromStream(SMnode *pMnode) {
|
||||||
|
SSdb *pSdb = pMnode->pSdb;
|
||||||
|
SStreamObj *pStream = NULL;
|
||||||
|
void *pIter = NULL;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream);
|
||||||
|
if (pIter == NULL) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
saveStreamTasksInfo(pStream, &execInfo);
|
||||||
|
sdbRelease(pSdb, pStream);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void updateStageInfo(STaskStatusEntry *pTaskEntry, int64_t stage) {
|
||||||
|
int32_t numOfNodes = taosArrayGetSize(execInfo.pNodeList);
|
||||||
|
for (int32_t j = 0; j < numOfNodes; ++j) {
|
||||||
|
SNodeEntry *pNodeEntry = taosArrayGet(execInfo.pNodeList, j);
|
||||||
|
if (pNodeEntry->nodeId == pTaskEntry->nodeId) {
|
||||||
|
mInfo("vgId:%d stage updated from %" PRId64 " to %" PRId64 ", nodeUpdate trigger by s-task:0x%" PRIx64,
|
||||||
|
pTaskEntry->nodeId, pTaskEntry->stage, stage, pTaskEntry->id.taskId);
|
||||||
|
|
||||||
|
pNodeEntry->stageUpdated = true;
|
||||||
|
pTaskEntry->stage = stage;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void addIntoCheckpointList(SArray* pList, const SFailedCheckpointInfo* pInfo) {
|
||||||
|
int32_t num = taosArrayGetSize(pList);
|
||||||
|
for(int32_t i = 0; i < num; ++i) {
|
||||||
|
SFailedCheckpointInfo* p = taosArrayGet(pList, i);
|
||||||
|
if (p->transId == pInfo->transId) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
taosArrayPush(pList, pInfo);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t createStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream) {
|
||||||
|
STrans *pTrans = doCreateTrans(pMnode, pStream, NULL, MND_STREAM_TASK_RESET_NAME, " reset from failed checkpoint");
|
||||||
|
if (pTrans == NULL) {
|
||||||
|
return terrno;
|
||||||
|
}
|
||||||
|
|
||||||
|
taosWLockLatch(&pStream->lock);
|
||||||
|
int32_t numOfLevels = taosArrayGetSize(pStream->tasks);
|
||||||
|
|
||||||
|
for (int32_t j = 0; j < numOfLevels; ++j) {
|
||||||
|
SArray *pLevel = taosArrayGetP(pStream->tasks, j);
|
||||||
|
|
||||||
|
int32_t numOfTasks = taosArrayGetSize(pLevel);
|
||||||
|
for (int32_t k = 0; k < numOfTasks; ++k) {
|
||||||
|
SStreamTask *pTask = taosArrayGetP(pLevel, k);
|
||||||
|
|
||||||
|
// todo extract method, with pause stream task
|
||||||
|
SVResetStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVResetStreamTaskReq));
|
||||||
|
if (pReq == NULL) {
|
||||||
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
mError("failed to malloc in reset stream, size:%" PRIzu ", code:%s", sizeof(SVResetStreamTaskReq),
|
||||||
|
tstrerror(TSDB_CODE_OUT_OF_MEMORY));
|
||||||
|
taosWUnLockLatch(&pStream->lock);
|
||||||
|
return terrno;
|
||||||
|
}
|
||||||
|
|
||||||
|
pReq->head.vgId = htonl(pTask->info.nodeId);
|
||||||
|
pReq->taskId = pTask->id.taskId;
|
||||||
|
pReq->streamId = pTask->id.streamId;
|
||||||
|
|
||||||
|
SEpSet epset = {0};
|
||||||
|
bool hasEpset = false;
|
||||||
|
int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
taosMemoryFree(pReq);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!hasEpset) {
|
||||||
|
taosMemoryFree(pReq);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
STransAction action = {0};
|
||||||
|
initTransAction(&action, pReq, sizeof(SVResetStreamTaskReq), TDMT_VND_STREAM_TASK_RESET, &epset, 0);
|
||||||
|
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||||
|
taosMemoryFree(pReq);
|
||||||
|
taosWUnLockLatch(&pStream->lock);
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return terrno;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
taosWUnLockLatch(&pStream->lock);
|
||||||
|
|
||||||
|
int32_t code = mndPersistTransLog(pStream, pTrans, SDB_STATUS_READY);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
sdbRelease(pMnode->pSdb, pStream);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mndTransPrepare(pMnode, pTrans) != 0) {
|
||||||
|
mError("trans:%d, failed to prepare update stream trans since %s", pTrans->id, terrstr());
|
||||||
|
sdbRelease(pMnode->pSdb, pStream);
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
sdbRelease(pMnode->pSdb, pStream);
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
|
||||||
|
return TSDB_CODE_ACTION_IN_PROGRESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t mndResetStatusFromCheckpoint(SMnode *pMnode, int64_t streamId, int32_t transId) {
|
||||||
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
mndKillTransImpl(pMnode, transId, "");
|
||||||
|
|
||||||
|
SStreamObj *pStream = mndGetStreamObj(pMnode, streamId);
|
||||||
|
if (pStream == NULL) {
|
||||||
|
code = TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||||
|
mError("failed to acquire the streamObj:0x%" PRIx64 " to reset checkpoint, may have been dropped", pStream->uid);
|
||||||
|
} else {
|
||||||
|
bool conflict = mndStreamTransConflictCheck(pMnode, pStream->uid, MND_STREAM_TASK_RESET_NAME, false);
|
||||||
|
if (conflict) {
|
||||||
|
mError("stream:%s other trans exists in DB:%s, dstTable:%s failed to start reset-status trans", pStream->name,
|
||||||
|
pStream->sourceDb, pStream->targetSTbName);
|
||||||
|
} else {
|
||||||
|
mDebug("stream:%s (0x%" PRIx64 ") reset checkpoint procedure, transId:%d, create reset trans", pStream->name,
|
||||||
|
pStream->uid, transId);
|
||||||
|
code = createStreamResetStatusTrans(pMnode, pStream);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mndReleaseStream(pMnode, pStream);
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t setNodeEpsetExpiredFlag(const SArray *pNodeList) {
|
||||||
|
int32_t num = taosArrayGetSize(pNodeList);
|
||||||
|
mInfo("set node expired for %d nodes", num);
|
||||||
|
|
||||||
|
for (int k = 0; k < num; ++k) {
|
||||||
|
int32_t *pVgId = taosArrayGet(pNodeList, k);
|
||||||
|
mInfo("set node expired for nodeId:%d, total:%d", *pVgId, num);
|
||||||
|
|
||||||
|
int32_t numOfNodes = taosArrayGetSize(execInfo.pNodeList);
|
||||||
|
for (int i = 0; i < numOfNodes; ++i) {
|
||||||
|
SNodeEntry *pNodeEntry = taosArrayGet(execInfo.pNodeList, i);
|
||||||
|
|
||||||
|
if (pNodeEntry->nodeId == *pVgId) {
|
||||||
|
mInfo("vgId:%d expired for some stream tasks, needs update nodeEp", *pVgId);
|
||||||
|
pNodeEntry->stageUpdated = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t mndProcessStreamHb(SRpcMsg *pReq) {
|
||||||
|
SMnode *pMnode = pReq->info.node;
|
||||||
|
SStreamHbMsg req = {0};
|
||||||
|
SArray *pList = taosArrayInit(4, sizeof(SFailedCheckpointInfo));
|
||||||
|
|
||||||
|
SDecoder decoder = {0};
|
||||||
|
tDecoderInit(&decoder, pReq->pCont, pReq->contLen);
|
||||||
|
|
||||||
|
if (tDecodeStreamHbMsg(&decoder, &req) < 0) {
|
||||||
|
streamMetaClearHbMsg(&req);
|
||||||
|
tDecoderClear(&decoder);
|
||||||
|
terrno = TSDB_CODE_INVALID_MSG;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
tDecoderClear(&decoder);
|
||||||
|
|
||||||
|
mTrace("receive stream-meta hb from vgId:%d, active numOfTasks:%d", req.vgId, req.numOfTasks);
|
||||||
|
|
||||||
|
taosThreadMutexLock(&execInfo.lock);
|
||||||
|
|
||||||
|
// extract stream task list
|
||||||
|
int32_t numOfExisted = taosHashGetSize(execInfo.pTaskMap);
|
||||||
|
if (numOfExisted == 0) {
|
||||||
|
doExtractTasksFromStream(pMnode);
|
||||||
|
}
|
||||||
|
|
||||||
|
initStreamNodeList(pMnode);
|
||||||
|
|
||||||
|
int32_t numOfUpdated = taosArrayGetSize(req.pUpdateNodes);
|
||||||
|
if (numOfUpdated > 0) {
|
||||||
|
mDebug("%d stream node(s) need updated from report of hbMsg(vgId:%d)", numOfUpdated, req.vgId);
|
||||||
|
setNodeEpsetExpiredFlag(req.pUpdateNodes);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool snodeChanged = false;
|
||||||
|
for (int32_t i = 0; i < req.numOfTasks; ++i) {
|
||||||
|
STaskStatusEntry *p = taosArrayGet(req.pTaskStatus, i);
|
||||||
|
|
||||||
|
STaskStatusEntry *pTaskEntry = taosHashGet(execInfo.pTaskMap, &p->id, sizeof(p->id));
|
||||||
|
if (pTaskEntry == NULL) {
|
||||||
|
mError("s-task:0x%" PRIx64 " not found in mnode task list", p->id.taskId);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pTaskEntry->stage != p->stage && pTaskEntry->stage != -1) {
|
||||||
|
updateStageInfo(pTaskEntry, p->stage);
|
||||||
|
if (pTaskEntry->nodeId == SNODE_HANDLE) {
|
||||||
|
snodeChanged = true;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// task is idle for more than 50 sec.
|
||||||
|
if (fabs(pTaskEntry->inputQUsed - p->inputQUsed) <= DBL_EPSILON) {
|
||||||
|
if (!pTaskEntry->inputQChanging) {
|
||||||
|
pTaskEntry->inputQUnchangeCounter++;
|
||||||
|
} else {
|
||||||
|
pTaskEntry->inputQChanging = false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pTaskEntry->inputQChanging = true;
|
||||||
|
pTaskEntry->inputQUnchangeCounter = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
streamTaskStatusCopy(pTaskEntry, p);
|
||||||
|
if (p->checkpointId != 0) {
|
||||||
|
if (p->checkpointFailed) {
|
||||||
|
mError("stream task:0x%" PRIx64 " checkpointId:%" PRIx64 " transId:%d failed, kill it", p->id.taskId,
|
||||||
|
p->checkpointId, p->chkpointTransId);
|
||||||
|
|
||||||
|
SFailedCheckpointInfo info = {
|
||||||
|
.transId = p->chkpointTransId, .checkpointId = p->checkpointId, .streamUid = p->id.streamId};
|
||||||
|
addIntoCheckpointList(pList, &info);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (p->status == pTaskEntry->status) {
|
||||||
|
pTaskEntry->statusLastDuration++;
|
||||||
|
} else {
|
||||||
|
pTaskEntry->status = p->status;
|
||||||
|
pTaskEntry->statusLastDuration = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (p->status != TASK_STATUS__READY) {
|
||||||
|
mDebug("received s-task:0x%" PRIx64 " not in ready status:%s", p->id.taskId, streamTaskGetStatusStr(p->status));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// current checkpoint is failed, rollback from the checkpoint trans
|
||||||
|
// kill the checkpoint trans and then set all tasks status to be normal
|
||||||
|
if (taosArrayGetSize(pList) > 0) {
|
||||||
|
bool allReady = true;
|
||||||
|
SArray *p = mndTakeVgroupSnapshot(pMnode, &allReady);
|
||||||
|
taosArrayDestroy(p);
|
||||||
|
|
||||||
|
if (allReady || snodeChanged) {
|
||||||
|
// if the execInfo.activeCheckpoint == 0, the checkpoint is restoring from wal
|
||||||
|
for(int32_t i = 0; i < taosArrayGetSize(pList); ++i) {
|
||||||
|
SFailedCheckpointInfo *pInfo = taosArrayGet(pList, i);
|
||||||
|
mInfo("checkpointId:%" PRId64 " transId:%d failed, issue task-reset trans to reset all tasks status",
|
||||||
|
pInfo->checkpointId, pInfo->transId);
|
||||||
|
|
||||||
|
mndResetStatusFromCheckpoint(pMnode, pInfo->streamUid, pInfo->transId);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
mInfo("not all vgroups are ready, wait for next HB from stream tasks to reset the task status");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
taosThreadMutexUnlock(&execInfo.lock);
|
||||||
|
streamMetaClearHbMsg(&req);
|
||||||
|
|
||||||
|
taosArrayDestroy(pList);
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
|
@ -160,3 +160,106 @@ int32_t mndAddtoCheckpointWaitingList(SStreamObj* pStream, int64_t checkpointId)
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
STrans *doCreateTrans(SMnode *pMnode, SStreamObj *pStream, SRpcMsg *pReq, const char *name, const char *pMsg) {
|
||||||
|
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq, name);
|
||||||
|
if (pTrans == NULL) {
|
||||||
|
mError("failed to build trans:%s, reason: %s", name, tstrerror(TSDB_CODE_OUT_OF_MEMORY));
|
||||||
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
mDebug("s-task:0x%" PRIx64 " start to build trans %s", pStream->uid, pMsg);
|
||||||
|
|
||||||
|
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetSTbName);
|
||||||
|
if (mndTransCheckConflict(pMnode, pTrans) != 0) {
|
||||||
|
terrno = TSDB_CODE_MND_TRANS_CONFLICT;
|
||||||
|
mError("failed to build trans:%s for stream:0x%" PRIx64 " code:%s", name, pStream->uid, tstrerror(terrno));
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
terrno = 0;
|
||||||
|
return pTrans;
|
||||||
|
}
|
||||||
|
|
||||||
|
SSdbRaw *mndStreamActionEncode(SStreamObj *pStream) {
|
||||||
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
void *buf = NULL;
|
||||||
|
|
||||||
|
SEncoder encoder;
|
||||||
|
tEncoderInit(&encoder, NULL, 0);
|
||||||
|
if (tEncodeSStreamObj(&encoder, pStream) < 0) {
|
||||||
|
tEncoderClear(&encoder);
|
||||||
|
goto STREAM_ENCODE_OVER;
|
||||||
|
}
|
||||||
|
int32_t tlen = encoder.pos;
|
||||||
|
tEncoderClear(&encoder);
|
||||||
|
|
||||||
|
int32_t size = sizeof(int32_t) + tlen + MND_STREAM_RESERVE_SIZE;
|
||||||
|
SSdbRaw *pRaw = sdbAllocRaw(SDB_STREAM, MND_STREAM_VER_NUMBER, size);
|
||||||
|
if (pRaw == NULL) goto STREAM_ENCODE_OVER;
|
||||||
|
|
||||||
|
buf = taosMemoryMalloc(tlen);
|
||||||
|
if (buf == NULL) goto STREAM_ENCODE_OVER;
|
||||||
|
|
||||||
|
tEncoderInit(&encoder, buf, tlen);
|
||||||
|
if (tEncodeSStreamObj(&encoder, pStream) < 0) {
|
||||||
|
tEncoderClear(&encoder);
|
||||||
|
goto STREAM_ENCODE_OVER;
|
||||||
|
}
|
||||||
|
tEncoderClear(&encoder);
|
||||||
|
|
||||||
|
int32_t dataPos = 0;
|
||||||
|
SDB_SET_INT32(pRaw, dataPos, tlen, STREAM_ENCODE_OVER);
|
||||||
|
SDB_SET_BINARY(pRaw, dataPos, buf, tlen, STREAM_ENCODE_OVER);
|
||||||
|
SDB_SET_DATALEN(pRaw, dataPos, STREAM_ENCODE_OVER);
|
||||||
|
|
||||||
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
STREAM_ENCODE_OVER:
|
||||||
|
taosMemoryFreeClear(buf);
|
||||||
|
if (terrno != TSDB_CODE_SUCCESS) {
|
||||||
|
mError("stream:%s, failed to encode to raw:%p since %s", pStream->name, pRaw, terrstr());
|
||||||
|
sdbFreeRaw(pRaw);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
mTrace("stream:%s, encode to raw:%p, row:%p, checkpoint:%" PRId64 "", pStream->name, pRaw, pStream,
|
||||||
|
pStream->checkpointId);
|
||||||
|
return pRaw;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t mndPersistTransLog(SStreamObj *pStream, STrans *pTrans, int32_t status) {
|
||||||
|
SSdbRaw *pCommitRaw = mndStreamActionEncode(pStream);
|
||||||
|
if (pCommitRaw == NULL) {
|
||||||
|
mError("failed to encode stream since %s", terrstr());
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
|
||||||
|
mError("stream trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
|
||||||
|
sdbFreeRaw(pCommitRaw);
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sdbSetRawStatus(pCommitRaw, status) != 0) {
|
||||||
|
mError("stream trans:%d failed to set raw status:%d since %s", pTrans->id, status, terrstr());
|
||||||
|
sdbFreeRaw(pCommitRaw);
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void initTransAction(STransAction *pAction, void *pCont, int32_t contLen, int32_t msgType, const SEpSet *pEpset,
|
||||||
|
int32_t retryCode) {
|
||||||
|
pAction->epSet = *pEpset;
|
||||||
|
pAction->contLen = contLen;
|
||||||
|
pAction->pCont = pCont;
|
||||||
|
pAction->msgType = msgType;
|
||||||
|
pAction->retryCode = retryCode;
|
||||||
|
}
|
|
@ -0,0 +1,281 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "mndStream.h"
|
||||||
|
#include "mndTrans.h"
|
||||||
|
#include "tmisce.h"
|
||||||
|
#include "mndVgroup.h"
|
||||||
|
|
||||||
|
SArray *mndTakeVgroupSnapshot(SMnode *pMnode, bool *allReady) {
|
||||||
|
SSdb *pSdb = pMnode->pSdb;
|
||||||
|
void *pIter = NULL;
|
||||||
|
SVgObj *pVgroup = NULL;
|
||||||
|
|
||||||
|
*allReady = true;
|
||||||
|
SArray *pVgroupListSnapshot = taosArrayInit(4, sizeof(SNodeEntry));
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
|
||||||
|
if (pIter == NULL) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
SNodeEntry entry = {.nodeId = pVgroup->vgId, .hbTimestamp = pVgroup->updateTime};
|
||||||
|
entry.epset = mndGetVgroupEpset(pMnode, pVgroup);
|
||||||
|
|
||||||
|
// if not all ready till now, no need to check the remaining vgroups.
|
||||||
|
if (*allReady) {
|
||||||
|
for (int32_t i = 0; i < pVgroup->replica; ++i) {
|
||||||
|
if (!pVgroup->vnodeGid[i].syncRestore) {
|
||||||
|
mInfo("vgId:%d not restored, not ready for checkpoint or other operations", pVgroup->vgId);
|
||||||
|
*allReady = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
ESyncState state = pVgroup->vnodeGid[i].syncState;
|
||||||
|
if (state == TAOS_SYNC_STATE_OFFLINE || state == TAOS_SYNC_STATE_ERROR) {
|
||||||
|
mInfo("vgId:%d offline/err, not ready for checkpoint or other operations", pVgroup->vgId);
|
||||||
|
*allReady = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
char buf[256] = {0};
|
||||||
|
EPSET_TO_STR(&entry.epset, buf);
|
||||||
|
mDebug("take node snapshot, nodeId:%d %s", entry.nodeId, buf);
|
||||||
|
taosArrayPush(pVgroupListSnapshot, &entry);
|
||||||
|
sdbRelease(pSdb, pVgroup);
|
||||||
|
}
|
||||||
|
|
||||||
|
SSnodeObj *pObj = NULL;
|
||||||
|
while (1) {
|
||||||
|
pIter = sdbFetch(pSdb, SDB_SNODE, pIter, (void **)&pObj);
|
||||||
|
if (pIter == NULL) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
SNodeEntry entry = {0};
|
||||||
|
addEpIntoEpSet(&entry.epset, pObj->pDnode->fqdn, pObj->pDnode->port);
|
||||||
|
entry.nodeId = SNODE_HANDLE;
|
||||||
|
|
||||||
|
char buf[256] = {0};
|
||||||
|
EPSET_TO_STR(&entry.epset, buf);
|
||||||
|
mDebug("take snode snapshot, nodeId:%d %s", entry.nodeId, buf);
|
||||||
|
taosArrayPush(pVgroupListSnapshot, &entry);
|
||||||
|
sdbRelease(pSdb, pObj);
|
||||||
|
}
|
||||||
|
|
||||||
|
return pVgroupListSnapshot;
|
||||||
|
}
|
||||||
|
|
||||||
|
SStreamObj *mndGetStreamObj(SMnode *pMnode, int64_t streamId) {
|
||||||
|
void *pIter = NULL;
|
||||||
|
SSdb *pSdb = pMnode->pSdb;
|
||||||
|
SStreamObj *pStream = NULL;
|
||||||
|
|
||||||
|
while ((pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream)) != NULL) {
|
||||||
|
if (pStream->uid == streamId) {
|
||||||
|
sdbCancelFetch(pSdb, pIter);
|
||||||
|
return pStream;
|
||||||
|
}
|
||||||
|
sdbRelease(pSdb, pStream);
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void mndKillTransImpl(SMnode *pMnode, int32_t transId, const char *pDbName) {
|
||||||
|
STrans *pTrans = mndAcquireTrans(pMnode, transId);
|
||||||
|
if (pTrans != NULL) {
|
||||||
|
mInfo("kill active transId:%d in Db:%s", transId, pDbName);
|
||||||
|
mndKillTrans(pMnode, pTrans);
|
||||||
|
mndReleaseTrans(pMnode, pTrans);
|
||||||
|
} else {
|
||||||
|
mError("failed to acquire trans in Db:%s, transId:%d", pDbName, transId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t extractNodeEpset(SMnode *pMnode, SEpSet *pEpSet, bool *hasEpset, int32_t taskId, int32_t nodeId) {
|
||||||
|
*hasEpset = false;
|
||||||
|
|
||||||
|
pEpSet->numOfEps = 0;
|
||||||
|
if (nodeId == SNODE_HANDLE) {
|
||||||
|
SSnodeObj *pObj = NULL;
|
||||||
|
void *pIter = NULL;
|
||||||
|
|
||||||
|
pIter = sdbFetch(pMnode->pSdb, SDB_SNODE, pIter, (void **)&pObj);
|
||||||
|
if (pIter != NULL) {
|
||||||
|
addEpIntoEpSet(pEpSet, pObj->pDnode->fqdn, pObj->pDnode->port);
|
||||||
|
sdbRelease(pMnode->pSdb, pObj);
|
||||||
|
sdbCancelFetch(pMnode->pSdb, pIter);
|
||||||
|
*hasEpset = true;
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
} else {
|
||||||
|
mError("failed to acquire snode epset");
|
||||||
|
return TSDB_CODE_INVALID_PARA;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
SVgObj *pVgObj = mndAcquireVgroup(pMnode, nodeId);
|
||||||
|
if (pVgObj != NULL) {
|
||||||
|
SEpSet epset = mndGetVgroupEpset(pMnode, pVgObj);
|
||||||
|
mndReleaseVgroup(pMnode, pVgObj);
|
||||||
|
|
||||||
|
epsetAssign(pEpSet, &epset);
|
||||||
|
*hasEpset = true;
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
} else {
|
||||||
|
mDebug("orphaned task:0x%x need to be dropped, nodeId:%d, no redo action", taskId, nodeId);
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t doResumeStreamTask(STrans *pTrans, SMnode *pMnode, SStreamTask *pTask, int8_t igUntreated) {
|
||||||
|
SVResumeStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVResumeStreamTaskReq));
|
||||||
|
if (pReq == NULL) {
|
||||||
|
mError("failed to malloc in resume stream, size:%" PRIzu ", code:%s", sizeof(SVResumeStreamTaskReq),
|
||||||
|
tstrerror(TSDB_CODE_OUT_OF_MEMORY));
|
||||||
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
pReq->head.vgId = htonl(pTask->info.nodeId);
|
||||||
|
pReq->taskId = pTask->id.taskId;
|
||||||
|
pReq->streamId = pTask->id.streamId;
|
||||||
|
pReq->igUntreated = igUntreated;
|
||||||
|
|
||||||
|
SEpSet epset = {0};
|
||||||
|
bool hasEpset = false;
|
||||||
|
int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
terrno = code;
|
||||||
|
taosMemoryFree(pReq);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
STransAction action = {0};
|
||||||
|
initTransAction(&action, pReq, sizeof(SVResumeStreamTaskReq), TDMT_STREAM_TASK_RESUME, &epset, 0);
|
||||||
|
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||||
|
taosMemoryFree(pReq);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
SStreamTask *mndGetStreamTask(STaskId *pId, SStreamObj *pStream) {
|
||||||
|
for (int32_t i = 0; i < taosArrayGetSize(pStream->tasks); i++) {
|
||||||
|
SArray *pLevel = taosArrayGetP(pStream->tasks, i);
|
||||||
|
|
||||||
|
int32_t numOfLevels = taosArrayGetSize(pLevel);
|
||||||
|
for (int32_t j = 0; j < numOfLevels; j++) {
|
||||||
|
SStreamTask *pTask = taosArrayGetP(pLevel, j);
|
||||||
|
if (pTask->id.taskId == pId->taskId) {
|
||||||
|
return pTask;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t mndGetNumOfStreamTasks(const SStreamObj *pStream) {
|
||||||
|
int32_t num = 0;
|
||||||
|
for(int32_t i = 0; i < taosArrayGetSize(pStream->tasks); ++i) {
|
||||||
|
SArray* pLevel = taosArrayGetP(pStream->tasks, i);
|
||||||
|
num += taosArrayGetSize(pLevel);
|
||||||
|
}
|
||||||
|
|
||||||
|
return num;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t mndResumeStreamTasks(STrans *pTrans, SMnode *pMnode, SStreamObj *pStream, int8_t igUntreated) {
|
||||||
|
int32_t size = taosArrayGetSize(pStream->tasks);
|
||||||
|
for (int32_t i = 0; i < size; i++) {
|
||||||
|
SArray *pTasks = taosArrayGetP(pStream->tasks, i);
|
||||||
|
int32_t sz = taosArrayGetSize(pTasks);
|
||||||
|
for (int32_t j = 0; j < sz; j++) {
|
||||||
|
SStreamTask *pTask = taosArrayGetP(pTasks, j);
|
||||||
|
if (doResumeStreamTask(pTrans, pMnode, pTask, igUntreated) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (atomic_load_8(&pTask->status.taskStatus) == TASK_STATUS__PAUSE) {
|
||||||
|
atomic_store_8(&pTask->status.taskStatus, pTask->status.statusBackup);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t doPauseStreamTask(SMnode *pMnode, STrans *pTrans, SStreamTask *pTask) {
|
||||||
|
SVPauseStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVPauseStreamTaskReq));
|
||||||
|
if (pReq == NULL) {
|
||||||
|
mError("failed to malloc in pause stream, size:%" PRIzu ", code:%s", sizeof(SVPauseStreamTaskReq),
|
||||||
|
tstrerror(TSDB_CODE_OUT_OF_MEMORY));
|
||||||
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
pReq->head.vgId = htonl(pTask->info.nodeId);
|
||||||
|
pReq->taskId = pTask->id.taskId;
|
||||||
|
pReq->streamId = pTask->id.streamId;
|
||||||
|
|
||||||
|
SEpSet epset = {0};
|
||||||
|
mDebug("pause node:%d, epset:%d", pTask->info.nodeId, epset.numOfEps);
|
||||||
|
bool hasEpset = false;
|
||||||
|
int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
terrno = code;
|
||||||
|
taosMemoryFree(pReq);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// no valid epset, return directly without redoAction
|
||||||
|
if (!hasEpset) {
|
||||||
|
taosMemoryFree(pReq);
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
STransAction action = {0};
|
||||||
|
initTransAction(&action, pReq, sizeof(SVPauseStreamTaskReq), TDMT_STREAM_TASK_PAUSE, &epset, 0);
|
||||||
|
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||||
|
taosMemoryFree(pReq);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t mndPauseStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream) {
|
||||||
|
SArray *tasks = pStream->tasks;
|
||||||
|
|
||||||
|
int32_t size = taosArrayGetSize(tasks);
|
||||||
|
for (int32_t i = 0; i < size; i++) {
|
||||||
|
SArray *pTasks = taosArrayGetP(tasks, i);
|
||||||
|
int32_t sz = taosArrayGetSize(pTasks);
|
||||||
|
for (int32_t j = 0; j < sz; j++) {
|
||||||
|
SStreamTask *pTask = taosArrayGetP(pTasks, j);
|
||||||
|
if (doPauseStreamTask(pMnode, pTrans, pTask) < 0) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (atomic_load_8(&pTask->status.taskStatus) != TASK_STATUS__PAUSE) {
|
||||||
|
atomic_store_8(&pTask->status.statusBackup, pTask->status.taskStatus);
|
||||||
|
atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__PAUSE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -235,7 +235,6 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
|
||||||
int32_t tqProcessTaskCheckpointReadyMsg(STQ* pTq, SRpcMsg* pMsg);
|
int32_t tqProcessTaskCheckpointReadyMsg(STQ* pTq, SRpcMsg* pMsg);
|
||||||
int32_t tqProcessTaskUpdateReq(STQ* pTq, SRpcMsg* pMsg);
|
int32_t tqProcessTaskUpdateReq(STQ* pTq, SRpcMsg* pMsg);
|
||||||
int32_t tqProcessTaskResetReq(STQ* pTq, SRpcMsg* pMsg);
|
int32_t tqProcessTaskResetReq(STQ* pTq, SRpcMsg* pMsg);
|
||||||
int32_t tqProcessTaskDropHTask(STQ* pTq, SRpcMsg* pMsg);
|
|
||||||
|
|
||||||
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver);
|
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver);
|
||||||
int32_t tqScanWal(STQ* pTq);
|
int32_t tqScanWal(STQ* pTq);
|
||||||
|
|
|
@ -886,7 +886,8 @@ static void doStartFillhistoryStep2(SStreamTask* pTask, SStreamTask* pStreamTask
|
||||||
pTask->execInfo.step2Start = taosGetTimestampMs();
|
pTask->execInfo.step2Start = taosGetTimestampMs();
|
||||||
|
|
||||||
if (done) {
|
if (done) {
|
||||||
qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, 0.0);
|
qDebug("s-task:%s scan wal(step 2) verRange:%" PRId64 "-%" PRId64 " ended, elapsed time:%.2fs", id, pRange->minVer,
|
||||||
|
pRange->maxVer, 0.0);
|
||||||
streamTaskPutTranstateIntoInputQ(pTask);
|
streamTaskPutTranstateIntoInputQ(pTask);
|
||||||
streamExecTask(pTask); // exec directly
|
streamExecTask(pTask); // exec directly
|
||||||
} else {
|
} else {
|
||||||
|
@ -1141,8 +1142,7 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, req.streamId, req.taskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pMeta, req.streamId, req.taskId);
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
tqError("vgId:%d failed to find s-task:0x%x, ignore checkpoint msg. it may have been destroyed already", vgId,
|
tqError("vgId:%d failed to find s-task:0x%x, ignore checkpoint msg. it may have been destroyed", vgId, req.taskId);
|
||||||
req.taskId);
|
|
||||||
SRpcMsg rsp = {0};
|
SRpcMsg rsp = {0};
|
||||||
buildCheckpointSourceRsp(&req, &pMsg->info, &rsp, 0);
|
buildCheckpointSourceRsp(&req, &pMsg->info, &rsp, 0);
|
||||||
tmsgSendRsp(&rsp); // error occurs
|
tmsgSendRsp(&rsp); // error occurs
|
||||||
|
@ -1169,8 +1169,9 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
|
||||||
taosThreadMutexLock(&pTask->lock);
|
taosThreadMutexLock(&pTask->lock);
|
||||||
ETaskStatus status = streamTaskGetStatus(pTask)->state;
|
ETaskStatus status = streamTaskGetStatus(pTask)->state;
|
||||||
|
|
||||||
|
if (req.mndTrigger == 1) {
|
||||||
if (status == TASK_STATUS__HALT || status == TASK_STATUS__PAUSE) {
|
if (status == TASK_STATUS__HALT || status == TASK_STATUS__PAUSE) {
|
||||||
tqError("s-task:%s not ready for checkpoint, since it is halt, ignore this checkpoint:%" PRId64 ", set it failure",
|
tqError("s-task:%s not ready for checkpoint, since it is halt, ignore checkpoint:%" PRId64 ", set it failure",
|
||||||
pTask->id.idStr, req.checkpointId);
|
pTask->id.idStr, req.checkpointId);
|
||||||
|
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
@ -1182,6 +1183,9 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
ASSERT(status == TASK_STATUS__HALT);
|
||||||
|
}
|
||||||
|
|
||||||
// check if the checkpoint msg already sent or not.
|
// check if the checkpoint msg already sent or not.
|
||||||
if (status == TASK_STATUS__CK) {
|
if (status == TASK_STATUS__CK) {
|
||||||
|
@ -1198,16 +1202,8 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
|
||||||
streamProcessCheckpointSourceReq(pTask, &req);
|
streamProcessCheckpointSourceReq(pTask, &req);
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
|
||||||
int32_t total = 0;
|
qInfo("s-task:%s (vgId:%d) level:%d receive checkpoint-source msg chkpt:%" PRId64 ", transId:%d",
|
||||||
streamMetaWLock(pMeta);
|
pTask->id.idStr, vgId, pTask->info.taskLevel, req.checkpointId, req.transId);
|
||||||
|
|
||||||
// set the initial value for generating check point
|
|
||||||
// set the mgmt epset info according to the checkout source msg from mnode, todo update mgmt epset if needed
|
|
||||||
total = pMeta->numOfStreamTasks;
|
|
||||||
streamMetaWUnLock(pMeta);
|
|
||||||
|
|
||||||
qInfo("s-task:%s (vgId:%d) level:%d receive checkpoint-source msg chkpt:%" PRId64 ", total checkpoint reqs:%d",
|
|
||||||
pTask->id.idStr, vgId, pTask->info.taskLevel, req.checkpointId, total);
|
|
||||||
|
|
||||||
code = streamAddCheckpointSourceRspMsg(&req, &pMsg->info, pTask, 1);
|
code = streamAddCheckpointSourceRspMsg(&req, &pMsg->info, pTask, 1);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
@ -1233,35 +1229,3 @@ int32_t tqProcessTaskUpdateReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
int32_t tqProcessTaskResetReq(STQ* pTq, SRpcMsg* pMsg) {
|
int32_t tqProcessTaskResetReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
return tqStreamTaskProcessTaskResetReq(pTq->pStreamMeta, pMsg);
|
return tqStreamTaskProcessTaskResetReq(pTq->pStreamMeta, pMsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: here we may receive this message more than once, so need to handle this case
|
|
||||||
int32_t tqProcessTaskDropHTask(STQ* pTq, SRpcMsg* pMsg) {
|
|
||||||
SVDropHTaskReq* pReq = (SVDropHTaskReq*)pMsg->pCont;
|
|
||||||
|
|
||||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId);
|
|
||||||
if (pTask == NULL) {
|
|
||||||
tqError("vgId:%d process drop fill-history task req, failed to acquire task:0x%x, it may have been dropped already",
|
|
||||||
pMeta->vgId, pReq->taskId);
|
|
||||||
return TSDB_CODE_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
tqDebug("s-task:%s receive drop fill-history msg from mnode", pTask->id.idStr);
|
|
||||||
if (pTask->hTaskInfo.id.taskId == 0) {
|
|
||||||
tqError("vgId:%d s-task:%s not have related fill-history task", pMeta->vgId, pTask->id.idStr);
|
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
|
||||||
return TSDB_CODE_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
taosThreadMutexLock(&pTask->lock);
|
|
||||||
SStreamTaskId id = {.streamId = pTask->hTaskInfo.id.streamId, .taskId = pTask->hTaskInfo.id.taskId};
|
|
||||||
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &id);
|
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
|
||||||
|
|
||||||
// clear the scheduler status
|
|
||||||
streamTaskSetSchedStatusInactive(pTask);
|
|
||||||
tqDebug("s-task:%s set scheduler status:%d after drop fill-history task", pTask->id.idStr, pTask->status.schedStatus);
|
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
|
||||||
return TSDB_CODE_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
|
@ -465,7 +465,7 @@ bool tqNextBlockImpl(STqReader* pReader, const char* idstr) {
|
||||||
int32_t numOfBlocks = taosArrayGetSize(pReader->submit.aSubmitTbData);
|
int32_t numOfBlocks = taosArrayGetSize(pReader->submit.aSubmitTbData);
|
||||||
while (pReader->nextBlk < numOfBlocks) {
|
while (pReader->nextBlk < numOfBlocks) {
|
||||||
tqDebug("try next data block, len:%d ver:%" PRId64 " index:%d/%d, %s", pReader->msg.msgLen, pReader->msg.ver,
|
tqDebug("try next data block, len:%d ver:%" PRId64 " index:%d/%d, %s", pReader->msg.msgLen, pReader->msg.ver,
|
||||||
pReader->nextBlk, numOfBlocks, idstr);
|
(pReader->nextBlk + 1), numOfBlocks, idstr);
|
||||||
|
|
||||||
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
|
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
|
||||||
if (pReader->tbIdHash == NULL) {
|
if (pReader->tbIdHash == NULL) {
|
||||||
|
|
|
@ -663,7 +663,8 @@ int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen
|
||||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||||
STaskId* pHTaskId = &pTask->hTaskInfo.id;
|
STaskId* pHTaskId = &pTask->hTaskInfo.id;
|
||||||
streamMetaUnregisterTask(pMeta, pHTaskId->streamId, pHTaskId->taskId);
|
streamMetaUnregisterTask(pMeta, pHTaskId->streamId, pHTaskId->taskId);
|
||||||
tqDebug("vgId:%d drop fill-history task:0x%x dropped firstly", vgId, (int32_t)pHTaskId->taskId);
|
tqDebug("s-task:0x%x vgId:%d drop fill-history task:0x%x firstly", pReq->taskId, vgId,
|
||||||
|
(int32_t)pHTaskId->taskId);
|
||||||
}
|
}
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
}
|
}
|
||||||
|
|
|
@ -600,11 +600,6 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg
|
||||||
tqProcessTaskResetReq(pVnode->pTq, pMsg);
|
tqProcessTaskResetReq(pVnode->pTq, pMsg);
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case TDMT_STREAM_HTASK_DROP: {
|
|
||||||
if (pVnode->restored && vnodeIsLeader(pVnode)) {
|
|
||||||
tqProcessTaskDropHTask(pVnode->pTq, pMsg);
|
|
||||||
}
|
|
||||||
} break;
|
|
||||||
case TDMT_VND_ALTER_CONFIRM:
|
case TDMT_VND_ALTER_CONFIRM:
|
||||||
needCommit = pVnode->config.hashChange;
|
needCommit = pVnode->config.hashChange;
|
||||||
if (vnodeProcessAlterConfirmReq(pVnode, ver, pReq, len, pRsp) < 0) {
|
if (vnodeProcessAlterConfirmReq(pVnode, ver, pReq, len, pRsp) < 0) {
|
||||||
|
|
|
@ -2197,7 +2197,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
||||||
pTSInfo->base.cond.startVersion = pStreamInfo->fillHistoryVer.minVer;
|
pTSInfo->base.cond.startVersion = pStreamInfo->fillHistoryVer.minVer;
|
||||||
pTSInfo->base.cond.endVersion = pStreamInfo->fillHistoryVer.maxVer;
|
pTSInfo->base.cond.endVersion = pStreamInfo->fillHistoryVer.maxVer;
|
||||||
pTSInfo->base.cond.twindows = pStreamInfo->fillHistoryWindow;
|
pTSInfo->base.cond.twindows = pStreamInfo->fillHistoryWindow;
|
||||||
qDebug("stream recover step2, verRange:%" PRId64 " - %" PRId64 ", window:%" PRId64 "-%" PRId64 ", %s",
|
qDebug("stream scan step2 (scan wal), verRange:%" PRId64 " - %" PRId64 ", window:%" PRId64 "-%" PRId64 ", %s",
|
||||||
pTSInfo->base.cond.startVersion, pTSInfo->base.cond.endVersion, pTSInfo->base.cond.twindows.skey,
|
pTSInfo->base.cond.startVersion, pTSInfo->base.cond.endVersion, pTSInfo->base.cond.twindows.skey,
|
||||||
pTSInfo->base.cond.twindows.ekey, id);
|
pTSInfo->base.cond.twindows.ekey, id);
|
||||||
pStreamInfo->recoverStep = STREAM_RECOVER_STEP__NONE;
|
pStreamInfo->recoverStep = STREAM_RECOVER_STEP__NONE;
|
||||||
|
|
|
@ -1305,10 +1305,13 @@ static bool validateStateOper(const SValueNode* pVal) {
|
||||||
if (TSDB_DATA_TYPE_BINARY != pVal->node.resType.type) {
|
if (TSDB_DATA_TYPE_BINARY != pVal->node.resType.type) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
if (strlen(varDataVal(pVal->datum.p)) == 2) {
|
||||||
return (
|
return (
|
||||||
0 == strncasecmp(varDataVal(pVal->datum.p), "GT", 2) || 0 == strncasecmp(varDataVal(pVal->datum.p), "GE", 2) ||
|
0 == strncasecmp(varDataVal(pVal->datum.p), "GT", 2) || 0 == strncasecmp(varDataVal(pVal->datum.p), "GE", 2) ||
|
||||||
0 == strncasecmp(varDataVal(pVal->datum.p), "LT", 2) || 0 == strncasecmp(varDataVal(pVal->datum.p), "LE", 2) ||
|
0 == strncasecmp(varDataVal(pVal->datum.p), "LT", 2) || 0 == strncasecmp(varDataVal(pVal->datum.p), "LE", 2) ||
|
||||||
0 == strncasecmp(varDataVal(pVal->datum.p), "EQ", 2) || 0 == strncasecmp(varDataVal(pVal->datum.p), "NE", 2));
|
0 == strncasecmp(varDataVal(pVal->datum.p), "EQ", 2) || 0 == strncasecmp(varDataVal(pVal->datum.p), "NE", 2));
|
||||||
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t translateStateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
static int32_t translateStateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||||
|
@ -3737,7 +3740,11 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.translateFunc = translateTbUidColumn,
|
.translateFunc = translateTbUidColumn,
|
||||||
.getEnvFunc = NULL,
|
.getEnvFunc = NULL,
|
||||||
.initFunc = NULL,
|
.initFunc = NULL,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.sprocessFunc = qTbUidFunction,
|
.sprocessFunc = qTbUidFunction,
|
||||||
|
#else
|
||||||
|
.sprocessFunc = NULL,
|
||||||
|
#endif
|
||||||
.finalizeFunc = NULL
|
.finalizeFunc = NULL
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -3747,7 +3754,11 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.translateFunc = translateVgIdColumn,
|
.translateFunc = translateVgIdColumn,
|
||||||
.getEnvFunc = NULL,
|
.getEnvFunc = NULL,
|
||||||
.initFunc = NULL,
|
.initFunc = NULL,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.sprocessFunc = qVgIdFunction,
|
.sprocessFunc = qVgIdFunction,
|
||||||
|
#else
|
||||||
|
.sprocessFunc = NULL,
|
||||||
|
#endif
|
||||||
.finalizeFunc = NULL
|
.finalizeFunc = NULL
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -1788,6 +1788,7 @@ bool getTimePseudoFuncEnv(SFunctionNode *UNUSED_PARAM(pFunc), SFuncExecEnv *pEnv
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
int32_t qStartTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
|
int32_t qStartTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
|
||||||
colDataSetInt64(pOutput->columnData, pOutput->numOfRows, (int64_t *)colDataGetData(pInput->columnData, 0));
|
colDataSetInt64(pOutput->columnData, pOutput->numOfRows, (int64_t *)colDataGetData(pInput->columnData, 0));
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -1797,6 +1798,7 @@ int32_t qEndTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu
|
||||||
colDataSetInt64(pOutput->columnData, pOutput->numOfRows, (int64_t *)colDataGetData(pInput->columnData, 1));
|
colDataSetInt64(pOutput->columnData, pOutput->numOfRows, (int64_t *)colDataGetData(pInput->columnData, 1));
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
int32_t winDurFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
|
int32_t winDurFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
|
||||||
colDataSetInt64(pOutput->columnData, pOutput->numOfRows, (int64_t *)colDataGetData(pInput->columnData, 2));
|
colDataSetInt64(pOutput->columnData, pOutput->numOfRows, (int64_t *)colDataGetData(pInput->columnData, 2));
|
||||||
|
@ -1824,7 +1826,7 @@ int32_t qTbnameFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pO
|
||||||
pOutput->numOfRows += pInput->numOfRows;
|
pOutput->numOfRows += pInput->numOfRows;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
int32_t qTbUidFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
|
int32_t qTbUidFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
|
||||||
char* p = colDataGetNumData(pInput->columnData, 0);
|
char* p = colDataGetNumData(pInput->columnData, 0);
|
||||||
|
|
||||||
|
@ -1848,7 +1850,7 @@ int32_t qVgIdFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOut
|
||||||
pOutput->numOfRows += pInput->numOfRows;
|
pOutput->numOfRows += pInput->numOfRows;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/** Aggregation functions **/
|
/** Aggregation functions **/
|
||||||
int32_t countScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
|
int32_t countScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
|
||||||
|
|
|
@ -56,13 +56,6 @@ struct SStreamTaskSM {
|
||||||
SArray* pWaitingEventList;
|
SArray* pWaitingEventList;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef struct SStreamEventInfo {
|
|
||||||
EStreamTaskEvent event;
|
|
||||||
const char* name;
|
|
||||||
} SStreamEventInfo;
|
|
||||||
|
|
||||||
// SStreamTaskSM* streamCreateStateMachine(SStreamTask* pTask);
|
|
||||||
// void* streamDestroyStateMachine(SStreamTaskSM* pSM);
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1768,8 +1768,8 @@ void taskDbInitOpt(STaskDbWrapper* pTaskDb) {
|
||||||
rocksdb_options_set_recycle_log_file_num(opts, 6);
|
rocksdb_options_set_recycle_log_file_num(opts, 6);
|
||||||
rocksdb_options_set_max_write_buffer_number(opts, 3);
|
rocksdb_options_set_max_write_buffer_number(opts, 3);
|
||||||
rocksdb_options_set_info_log_level(opts, 1);
|
rocksdb_options_set_info_log_level(opts, 1);
|
||||||
rocksdb_options_set_db_write_buffer_size(opts, 64 << 20);
|
rocksdb_options_set_db_write_buffer_size(opts, 256 << 20);
|
||||||
rocksdb_options_set_write_buffer_size(opts, 32 << 20);
|
rocksdb_options_set_write_buffer_size(opts, 128 << 20);
|
||||||
rocksdb_options_set_atomic_flush(opts, 1);
|
rocksdb_options_set_atomic_flush(opts, 1);
|
||||||
|
|
||||||
pTaskDb->dbOpt = opts;
|
pTaskDb->dbOpt = opts;
|
||||||
|
@ -1780,6 +1780,7 @@ void taskDbInitOpt(STaskDbWrapper* pTaskDb) {
|
||||||
rocksdb_options_set_compaction_filter_factory(pTaskDb->dbOpt, pTaskDb->filterFactory);
|
rocksdb_options_set_compaction_filter_factory(pTaskDb->dbOpt, pTaskDb->filterFactory);
|
||||||
pTaskDb->readOpt = rocksdb_readoptions_create();
|
pTaskDb->readOpt = rocksdb_readoptions_create();
|
||||||
pTaskDb->writeOpt = rocksdb_writeoptions_create();
|
pTaskDb->writeOpt = rocksdb_writeoptions_create();
|
||||||
|
rocksdb_writeoptions_disable_WAL(pTaskDb->writeOpt, 1);
|
||||||
|
|
||||||
size_t nCf = sizeof(ginitDict) / sizeof(ginitDict[0]);
|
size_t nCf = sizeof(ginitDict) / sizeof(ginitDict[0]);
|
||||||
pTaskDb->pCf = taosMemoryCalloc(nCf, sizeof(rocksdb_column_family_handle_t*));
|
pTaskDb->pCf = taosMemoryCalloc(nCf, sizeof(rocksdb_column_family_handle_t*));
|
||||||
|
|
|
@ -36,6 +36,7 @@ int32_t tEncodeStreamCheckpointSourceReq(SEncoder* pEncoder, const SStreamCheckp
|
||||||
if (tEncodeI32(pEncoder, pReq->mnodeId) < 0) return -1;
|
if (tEncodeI32(pEncoder, pReq->mnodeId) < 0) return -1;
|
||||||
if (tEncodeI64(pEncoder, pReq->expireTime) < 0) return -1;
|
if (tEncodeI64(pEncoder, pReq->expireTime) < 0) return -1;
|
||||||
if (tEncodeI32(pEncoder, pReq->transId) < 0) return -1;
|
if (tEncodeI32(pEncoder, pReq->transId) < 0) return -1;
|
||||||
|
if (tEncodeI8(pEncoder, pReq->mndTrigger) < 0) return -1;
|
||||||
tEndEncode(pEncoder);
|
tEndEncode(pEncoder);
|
||||||
return pEncoder->pos;
|
return pEncoder->pos;
|
||||||
}
|
}
|
||||||
|
@ -50,6 +51,7 @@ int32_t tDecodeStreamCheckpointSourceReq(SDecoder* pDecoder, SStreamCheckpointSo
|
||||||
if (tDecodeI32(pDecoder, &pReq->mnodeId) < 0) return -1;
|
if (tDecodeI32(pDecoder, &pReq->mnodeId) < 0) return -1;
|
||||||
if (tDecodeI64(pDecoder, &pReq->expireTime) < 0) return -1;
|
if (tDecodeI64(pDecoder, &pReq->expireTime) < 0) return -1;
|
||||||
if (tDecodeI32(pDecoder, &pReq->transId) < 0) return -1;
|
if (tDecodeI32(pDecoder, &pReq->transId) < 0) return -1;
|
||||||
|
if (tDecodeI8(pDecoder, &pReq->mndTrigger) < 0) return -1;
|
||||||
tEndDecode(pDecoder);
|
tEndDecode(pDecoder);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -151,7 +153,8 @@ int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSo
|
||||||
|
|
||||||
// todo this status may not be set here.
|
// todo this status may not be set here.
|
||||||
// 1. set task status to be prepared for check point, no data are allowed to put into inputQ.
|
// 1. set task status to be prepared for check point, no data are allowed to put into inputQ.
|
||||||
streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_GEN_CHECKPOINT);
|
int32_t code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_GEN_CHECKPOINT);
|
||||||
|
ASSERT(code == TSDB_CODE_SUCCESS);
|
||||||
|
|
||||||
pTask->chkInfo.transId = pReq->transId;
|
pTask->chkInfo.transId = pReq->transId;
|
||||||
pTask->chkInfo.checkpointingId = pReq->checkpointId;
|
pTask->chkInfo.checkpointingId = pReq->checkpointId;
|
||||||
|
@ -160,8 +163,7 @@ int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSo
|
||||||
pTask->execInfo.checkpoint += 1;
|
pTask->execInfo.checkpoint += 1;
|
||||||
|
|
||||||
// 2. Put the checkpoint block into inputQ, to make sure all blocks with less version have been handled by this task
|
// 2. Put the checkpoint block into inputQ, to make sure all blocks with less version have been handled by this task
|
||||||
int32_t code = appendCheckpointIntoInputQ(pTask, STREAM_INPUT__CHECKPOINT_TRIGGER);
|
return appendCheckpointIntoInputQ(pTask, STREAM_INPUT__CHECKPOINT_TRIGGER);
|
||||||
return code;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t continueDispatchCheckpointBlock(SStreamDataBlock* pBlock, SStreamTask* pTask) {
|
static int32_t continueDispatchCheckpointBlock(SStreamDataBlock* pBlock, SStreamTask* pTask) {
|
||||||
|
@ -315,8 +317,9 @@ int32_t streamSaveTaskCheckpointInfo(SStreamTask* p, int64_t checkpointId) {
|
||||||
pCKInfo->checkpointVer = pCKInfo->processedVer;
|
pCKInfo->checkpointVer = pCKInfo->processedVer;
|
||||||
|
|
||||||
streamTaskClearCheckInfo(p, false);
|
streamTaskClearCheckInfo(p, false);
|
||||||
code = streamTaskHandleEvent(p->status.pSM, TASK_EVENT_CHECKPOINT_DONE);
|
|
||||||
taosThreadMutexUnlock(&p->lock);
|
taosThreadMutexUnlock(&p->lock);
|
||||||
|
|
||||||
|
code = streamTaskHandleEvent(p->status.pSM, TASK_EVENT_CHECKPOINT_DONE);
|
||||||
} else {
|
} else {
|
||||||
stDebug("s-task:%s vgId:%d status:%s not keep the checkpoint metaInfo, checkpoint:%" PRId64 " failed", id, vgId,
|
stDebug("s-task:%s vgId:%d status:%s not keep the checkpoint metaInfo, checkpoint:%" PRId64 " failed", id, vgId,
|
||||||
pStatus->name, pCKInfo->checkpointingId);
|
pStatus->name, pCKInfo->checkpointingId);
|
||||||
|
@ -459,6 +462,7 @@ int32_t streamTaskBuildCheckpoint(SStreamTask* pTask) {
|
||||||
int64_t startTs = pTask->chkInfo.startTs;
|
int64_t startTs = pTask->chkInfo.startTs;
|
||||||
int64_t ckId = pTask->chkInfo.checkpointingId;
|
int64_t ckId = pTask->chkInfo.checkpointingId;
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
|
bool dropRelHTask = (streamTaskGetPrevStatus(pTask) == TASK_STATUS__HALT);
|
||||||
|
|
||||||
// sink task do not need to save the status, and generated the checkpoint
|
// sink task do not need to save the status, and generated the checkpoint
|
||||||
if (pTask->info.taskLevel != TASK_LEVEL__SINK) {
|
if (pTask->info.taskLevel != TASK_LEVEL__SINK) {
|
||||||
|
@ -497,6 +501,21 @@ int32_t streamTaskBuildCheckpoint(SStreamTask* pTask) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((code == TSDB_CODE_SUCCESS) && dropRelHTask) {
|
||||||
|
// transferred from the halt status, it is done the fill-history procedure and finish with the checkpoint
|
||||||
|
// free it and remove fill-history task from disk meta-store
|
||||||
|
taosThreadMutexLock(&pTask->lock);
|
||||||
|
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||||
|
SStreamTaskId hTaskId = {.streamId = pTask->hTaskInfo.id.streamId, .taskId = pTask->hTaskInfo.id.taskId};
|
||||||
|
|
||||||
|
stDebug("s-task:%s fill-history finish checkpoint done, drop related fill-history task:0x%x", id, hTaskId.taskId);
|
||||||
|
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pTask->pMeta->vgId, &hTaskId);
|
||||||
|
} else {
|
||||||
|
stWarn("s-task:%s related fill-history task:0x%x is erased", id, (int32_t)pTask->hTaskInfo.id.taskId);
|
||||||
|
}
|
||||||
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
}
|
||||||
|
|
||||||
// clear the checkpoint info if failed
|
// clear the checkpoint info if failed
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
taosThreadMutexLock(&pTask->lock);
|
taosThreadMutexLock(&pTask->lock);
|
||||||
|
|
|
@ -340,7 +340,7 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) {
|
||||||
} else {
|
} else {
|
||||||
double el = (taosGetTimestampMs() - pTask->execInfo.step2Start) / 1000.;
|
double el = (taosGetTimestampMs() - pTask->execInfo.step2Start) / 1000.;
|
||||||
stDebug(
|
stDebug(
|
||||||
"s-task:%s fill-history task end, scal wal elapsed time:%.2fSec,update related stream task:%s info, transfer "
|
"s-task:%s fill-history task end, scan wal elapsed time:%.2fSec,update related stream task:%s info, transfer "
|
||||||
"exec state",
|
"exec state",
|
||||||
id, el, pStreamTask->id.idStr);
|
id, el, pStreamTask->id.idStr);
|
||||||
}
|
}
|
||||||
|
@ -380,56 +380,34 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) {
|
||||||
return TSDB_CODE_STREAM_TASK_IVLD_STATUS;
|
return TSDB_CODE_STREAM_TASK_IVLD_STATUS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 1. expand the query time window for stream task of WAL scanner
|
||||||
if (pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
if (pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||||
// update the scan data range for source task.
|
// update the scan data range for source task.
|
||||||
stDebug("s-task:%s level:%d stream task window %" PRId64 " - %" PRId64 " update to %" PRId64 " - %" PRId64
|
stDebug("s-task:%s level:%d stream task window %" PRId64 " - %" PRId64 " update to %" PRId64 " - %" PRId64
|
||||||
", status:%s, sched-status:%d",
|
", status:%s, sched-status:%d",
|
||||||
pStreamTask->id.idStr, TASK_LEVEL__SOURCE, pTimeWindow->skey, pTimeWindow->ekey, INT64_MIN,
|
pStreamTask->id.idStr, TASK_LEVEL__SOURCE, pTimeWindow->skey, pTimeWindow->ekey, INT64_MIN,
|
||||||
pTimeWindow->ekey, p, pStreamTask->status.schedStatus);
|
pTimeWindow->ekey, p, pStreamTask->status.schedStatus);
|
||||||
} else {
|
|
||||||
stDebug("s-task:%s no need to update time window for non-source task", pStreamTask->id.idStr);
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1. expand the query time window for stream task of WAL scanner
|
|
||||||
if (pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
|
||||||
pTimeWindow->skey = INT64_MIN;
|
pTimeWindow->skey = INT64_MIN;
|
||||||
qStreamInfoResetTimewindowFilter(pStreamTask->exec.pExecutor);
|
qStreamInfoResetTimewindowFilter(pStreamTask->exec.pExecutor);
|
||||||
} else {
|
} else {
|
||||||
stDebug("s-task:%s non-source task no need to reset filter window", pStreamTask->id.idStr);
|
stDebug("s-task:%s no need to update/reset filter time window for non-source tasks", pStreamTask->id.idStr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. transfer the ownership of executor state
|
// 2. transfer the ownership of executor state
|
||||||
streamTaskReleaseState(pTask);
|
streamTaskReleaseState(pTask);
|
||||||
streamTaskReloadState(pStreamTask);
|
streamTaskReloadState(pStreamTask);
|
||||||
|
|
||||||
// 3. resume the state of stream task, after this function, the stream task will run immediately.
|
// 3. send msg to mnode to launch a checkpoint to keep the state for current stream
|
||||||
streamTaskResume(pStreamTask);
|
streamTaskSendCheckpointReq(pStreamTask);
|
||||||
|
// streamTaskResume(pStreamTask);
|
||||||
|
|
||||||
stDebug("s-task:%s fill-history task set status to be dropping, save the state into disk", id);
|
// 4. assign the status to the value that will be kept in disk
|
||||||
|
|
||||||
// 4. free it and remove fill-history task from disk meta-store
|
|
||||||
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &pTask->id);
|
|
||||||
|
|
||||||
// 5. assign the status to the value that will be kept in disk
|
|
||||||
pStreamTask->status.taskStatus = streamTaskGetStatus(pStreamTask)->state;
|
pStreamTask->status.taskStatus = streamTaskGetStatus(pStreamTask)->state;
|
||||||
|
|
||||||
// 6. open the inputQ for all upstream tasks
|
// 5. open the inputQ for all upstream tasks
|
||||||
streamTaskOpenAllUpstreamInput(pStreamTask);
|
streamTaskOpenAllUpstreamInput(pStreamTask);
|
||||||
|
|
||||||
// 7. add empty delete block
|
|
||||||
if ((pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE) && taosQueueEmpty(pStreamTask->inputq.queue->pQueue)) {
|
|
||||||
SStreamRefDataBlock* pItem = taosAllocateQitem(sizeof(SStreamRefDataBlock), DEF_QITEM, 0);
|
|
||||||
|
|
||||||
SSDataBlock* pDelBlock = createSpecialDataBlock(STREAM_DELETE_DATA);
|
|
||||||
pDelBlock->info.rows = 0;
|
|
||||||
pDelBlock->info.version = 0;
|
|
||||||
pItem->type = STREAM_INPUT__REF_DATA_BLOCK;
|
|
||||||
pItem->pBlock = pDelBlock;
|
|
||||||
int32_t code = streamTaskPutDataIntoInputQ(pStreamTask, (SStreamQueueItem*)pItem);
|
|
||||||
stDebug("s-task:%s append dummy delete block,res:%d", pStreamTask->id.idStr, code);
|
|
||||||
}
|
|
||||||
|
|
||||||
streamSchedExec(pStreamTask);
|
|
||||||
streamMetaReleaseTask(pMeta, pStreamTask);
|
streamMetaReleaseTask(pMeta, pStreamTask);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -447,14 +425,24 @@ int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
|
||||||
|
|
||||||
if (level == TASK_LEVEL__AGG || level == TASK_LEVEL__SOURCE) { // do transfer task operator states.
|
if (level == TASK_LEVEL__AGG || level == TASK_LEVEL__SOURCE) { // do transfer task operator states.
|
||||||
code = streamDoTransferStateToStreamTask(pTask);
|
code = streamDoTransferStateToStreamTask(pTask);
|
||||||
} else { // drop fill-history task and open inputQ of sink task
|
} else { // no state transfer for sink tasks, and drop fill-history task, followed by opening inputQ of sink task.
|
||||||
SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId);
|
SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId);
|
||||||
if (pStreamTask != NULL) {
|
if (pStreamTask != NULL) {
|
||||||
streamTaskOpenAllUpstreamInput(pStreamTask);
|
// halt the related stream sink task
|
||||||
|
code = streamTaskHandleEvent(pStreamTask->status.pSM, TASK_EVENT_HALT);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
stError("s-task:%s halt stream task:%s failed, code:%s not transfer state to stream task", pTask->id.idStr,
|
||||||
|
pStreamTask->id.idStr, tstrerror(code));
|
||||||
streamMetaReleaseTask(pMeta, pStreamTask);
|
streamMetaReleaseTask(pMeta, pStreamTask);
|
||||||
|
return code;
|
||||||
|
} else {
|
||||||
|
stDebug("s-task:%s halt by related fill-history task:%s", pStreamTask->id.idStr, pTask->id.idStr);
|
||||||
}
|
}
|
||||||
|
|
||||||
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &pTask->id);
|
streamTaskOpenAllUpstreamInput(pStreamTask);
|
||||||
|
streamTaskSendCheckpointReq(pStreamTask);
|
||||||
|
streamMetaReleaseTask(pMeta, pStreamTask);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
|
@ -718,7 +706,8 @@ bool streamTaskReadyToRun(const SStreamTask* pTask, char** pStatus) {
|
||||||
return (st == TASK_STATUS__READY || st == TASK_STATUS__SCAN_HISTORY || st == TASK_STATUS__CK ||
|
return (st == TASK_STATUS__READY || st == TASK_STATUS__SCAN_HISTORY || st == TASK_STATUS__CK ||
|
||||||
st == TASK_STATUS__PAUSE || st == TASK_STATUS__HALT);
|
st == TASK_STATUS__PAUSE || st == TASK_STATUS__HALT);
|
||||||
} else {
|
} else {
|
||||||
return (st == TASK_STATUS__READY || st == TASK_STATUS__SCAN_HISTORY || st == TASK_STATUS__CK);
|
return (st == TASK_STATUS__READY || st == TASK_STATUS__SCAN_HISTORY || st == TASK_STATUS__CK ||
|
||||||
|
st == TASK_STATUS__HALT);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -771,8 +760,7 @@ static int32_t schedTaskInFuture(SStreamTask* pTask) {
|
||||||
pTask->status.schedIdleTime, ref);
|
pTask->status.schedIdleTime, ref);
|
||||||
|
|
||||||
// add one ref count for task
|
// add one ref count for task
|
||||||
// todo this may be failed, and add ref may be failed.
|
/*SStreamTask* pAddRefTask = */streamMetaAcquireOneTask(pTask);
|
||||||
SStreamTask* pAddRefTask = streamMetaAcquireTask(pTask->pMeta, pTask->id.streamId, pTask->id.taskId);
|
|
||||||
|
|
||||||
if (pTask->schedInfo.pIdleTimer == NULL) {
|
if (pTask->schedInfo.pIdleTimer == NULL) {
|
||||||
pTask->schedInfo.pIdleTimer = taosTmrStart(doStreamExecTaskHelper, pTask->status.schedIdleTime, pTask, streamTimer);
|
pTask->schedInfo.pIdleTimer = taosTmrStart(doStreamExecTaskHelper, pTask->status.schedIdleTime, pTask, streamTimer);
|
||||||
|
@ -788,21 +776,13 @@ int32_t streamResumeTask(SStreamTask* pTask) {
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
/*int32_t code = */doStreamExecTask(pTask);
|
/*int32_t code = */ doStreamExecTask(pTask);
|
||||||
taosThreadMutexLock(&pTask->lock);
|
taosThreadMutexLock(&pTask->lock);
|
||||||
|
|
||||||
// check if this task needs to be idle for a while
|
|
||||||
if (pTask->status.schedIdleTime > 0) {
|
|
||||||
schedTaskInFuture(pTask);
|
|
||||||
|
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
|
||||||
setLastExecTs(pTask, taosGetTimestampMs());
|
|
||||||
return 0;
|
|
||||||
} else {
|
|
||||||
int32_t numOfItems = streamQueueGetNumOfItems(pTask->inputq.queue);
|
int32_t numOfItems = streamQueueGetNumOfItems(pTask->inputq.queue);
|
||||||
|
|
||||||
if ((numOfItems == 0) || streamTaskShouldStop(pTask) || streamTaskShouldPause(pTask)) {
|
if ((numOfItems == 0) || streamTaskShouldStop(pTask) || streamTaskShouldPause(pTask)) {
|
||||||
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE);
|
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE);
|
||||||
|
clearTaskSchedInfo(pTask);
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
|
||||||
setLastExecTs(pTask, taosGetTimestampMs());
|
setLastExecTs(pTask, taosGetTimestampMs());
|
||||||
|
@ -812,6 +792,14 @@ int32_t streamResumeTask(SStreamTask* pTask) {
|
||||||
pTask->status.schedStatus, pTask->status.lastExecTs);
|
pTask->status.schedStatus, pTask->status.lastExecTs);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
} else {
|
||||||
|
// check if this task needs to be idle for a while
|
||||||
|
if (pTask->status.schedIdleTime > 0) {
|
||||||
|
schedTaskInFuture(pTask);
|
||||||
|
|
||||||
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
setLastExecTs(pTask, taosGetTimestampMs());
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -257,8 +257,6 @@ int32_t streamTaskSetDb(SStreamMeta* pMeta, void* arg, char* key) {
|
||||||
|
|
||||||
STaskDbWrapper* pBackend = *ppBackend;
|
STaskDbWrapper* pBackend = *ppBackend;
|
||||||
pBackend->pMeta = pMeta;
|
pBackend->pMeta = pMeta;
|
||||||
|
|
||||||
pTask->backendRefId = pBackend->refId;
|
|
||||||
pTask->pBackend = pBackend;
|
pTask->pBackend = pBackend;
|
||||||
|
|
||||||
taosThreadMutexUnlock(&pMeta->backendMutex);
|
taosThreadMutexUnlock(&pMeta->backendMutex);
|
||||||
|
@ -283,7 +281,6 @@ int32_t streamTaskSetDb(SStreamMeta* pMeta, void* arg, char* key) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t tref = taosAddRef(taskDbWrapperId, pBackend);
|
int64_t tref = taosAddRef(taskDbWrapperId, pBackend);
|
||||||
pTask->backendRefId = tref;
|
|
||||||
pTask->pBackend = pBackend;
|
pTask->pBackend = pBackend;
|
||||||
pBackend->refId = tref;
|
pBackend->refId = tref;
|
||||||
pBackend->pTask = pTask;
|
pBackend->pTask = pTask;
|
||||||
|
@ -467,7 +464,6 @@ void streamMetaClear(SStreamMeta* pMeta) {
|
||||||
}
|
}
|
||||||
|
|
||||||
taosRemoveRef(streamBackendId, pMeta->streamBackendRid);
|
taosRemoveRef(streamBackendId, pMeta->streamBackendRid);
|
||||||
|
|
||||||
taosHashClear(pMeta->pTasksMap);
|
taosHashClear(pMeta->pTasksMap);
|
||||||
|
|
||||||
taosArrayClear(pMeta->pTaskList);
|
taosArrayClear(pMeta->pTaskList);
|
||||||
|
@ -505,7 +501,9 @@ void streamMetaCloseImpl(void* arg) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
streamMetaWLock(pMeta);
|
||||||
streamMetaClear(pMeta);
|
streamMetaClear(pMeta);
|
||||||
|
streamMetaWUnLock(pMeta);
|
||||||
|
|
||||||
tdbAbort(pMeta->db, pMeta->txn);
|
tdbAbort(pMeta->db, pMeta->txn);
|
||||||
tdbTbClose(pMeta->pTaskDb);
|
tdbTbClose(pMeta->pTaskDb);
|
||||||
|
@ -519,7 +517,6 @@ void streamMetaCloseImpl(void* arg) {
|
||||||
taosHashCleanup(pMeta->pTasksMap);
|
taosHashCleanup(pMeta->pTasksMap);
|
||||||
taosHashCleanup(pMeta->pTaskDbUnique);
|
taosHashCleanup(pMeta->pTaskDbUnique);
|
||||||
taosHashCleanup(pMeta->pUpdateTaskSet);
|
taosHashCleanup(pMeta->pUpdateTaskSet);
|
||||||
// taosHashCleanup(pMeta->pTaskBackendUnique);
|
|
||||||
taosHashCleanup(pMeta->updateInfo.pTasks);
|
taosHashCleanup(pMeta->updateInfo.pTasks);
|
||||||
taosHashCleanup(pMeta->startInfo.pReadyTaskSet);
|
taosHashCleanup(pMeta->startInfo.pReadyTaskSet);
|
||||||
taosHashCleanup(pMeta->startInfo.pFailedTaskSet);
|
taosHashCleanup(pMeta->startInfo.pFailedTaskSet);
|
||||||
|
@ -534,6 +531,8 @@ void streamMetaCloseImpl(void* arg) {
|
||||||
bkdMgtDestroy(pMeta->bkdChkptMgt);
|
bkdMgtDestroy(pMeta->bkdChkptMgt);
|
||||||
|
|
||||||
pMeta->role = NODE_ROLE_UNINIT;
|
pMeta->role = NODE_ROLE_UNINIT;
|
||||||
|
taosThreadRwlockDestroy(&pMeta->lock);
|
||||||
|
|
||||||
taosMemoryFree(pMeta);
|
taosMemoryFree(pMeta);
|
||||||
stDebug("end to close stream meta");
|
stDebug("end to close stream meta");
|
||||||
}
|
}
|
||||||
|
@ -647,6 +646,12 @@ SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SStreamTask* streamMetaAcquireOneTask(SStreamTask* pTask) {
|
||||||
|
int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1);
|
||||||
|
stTrace("s-task:%s acquire task, ref:%d", pTask->id.idStr, ref);
|
||||||
|
return pTask;
|
||||||
|
}
|
||||||
|
|
||||||
void streamMetaReleaseTask(SStreamMeta* UNUSED_PARAM(pMeta), SStreamTask* pTask) {
|
void streamMetaReleaseTask(SStreamMeta* UNUSED_PARAM(pMeta), SStreamTask* pTask) {
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->refCnt, 1);
|
int32_t ref = atomic_sub_fetch_32(&pTask->refCnt, 1);
|
||||||
if (ref > 0) {
|
if (ref > 0) {
|
||||||
|
@ -724,14 +729,16 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
|
||||||
pTask = *ppTask;
|
pTask = *ppTask;
|
||||||
|
|
||||||
// it is an fill-history task, remove the related stream task's id that points to it
|
// it is an fill-history task, remove the related stream task's id that points to it
|
||||||
if (pTask->info.fillHistory == 1) {
|
|
||||||
streamTaskClearHTaskAttr(pTask);
|
|
||||||
} else {
|
|
||||||
atomic_sub_fetch_32(&pMeta->numOfStreamTasks, 1);
|
atomic_sub_fetch_32(&pMeta->numOfStreamTasks, 1);
|
||||||
|
if (pTask->info.fillHistory == 1) {
|
||||||
|
streamTaskClearHTaskAttr(pTask, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosHashRemove(pMeta->pTasksMap, &id, sizeof(id));
|
taosHashRemove(pMeta->pTasksMap, &id, sizeof(id));
|
||||||
doRemoveIdFromList(pMeta, (int32_t)taosArrayGetSize(pMeta->pTaskList), &pTask->id);
|
doRemoveIdFromList(pMeta, (int32_t)taosArrayGetSize(pMeta->pTaskList), &pTask->id);
|
||||||
|
streamMetaRemoveTask(pMeta, &id);
|
||||||
|
|
||||||
|
streamMetaWUnLock(pMeta);
|
||||||
|
|
||||||
ASSERT(pTask->status.timerActive == 0);
|
ASSERT(pTask->status.timerActive == 0);
|
||||||
|
|
||||||
|
@ -742,13 +749,12 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMetaRemoveTask(pMeta, &id);
|
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
} else {
|
} else {
|
||||||
stDebug("vgId:%d failed to find the task:0x%x, it may have been dropped already", pMeta->vgId, taskId);
|
stDebug("vgId:%d failed to find the task:0x%x, it may have been dropped already", pMeta->vgId, taskId);
|
||||||
|
streamMetaWUnLock(pMeta);
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMetaWUnLock(pMeta);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1269,11 +1275,11 @@ void streamMetaNotifyClose(SStreamMeta* pMeta) {
|
||||||
|
|
||||||
// wait for the stream meta hb function stopping
|
// wait for the stream meta hb function stopping
|
||||||
if (pMeta->role == NODE_ROLE_LEADER) {
|
if (pMeta->role == NODE_ROLE_LEADER) {
|
||||||
// pMeta->pHbInfo->stopFlag = STREAM_META_WILL_STOP;
|
pMeta->pHbInfo->stopFlag = STREAM_META_WILL_STOP;
|
||||||
// while (pMeta->pHbInfo->stopFlag != STREAM_META_OK_TO_STOP) {
|
while (pMeta->pHbInfo->stopFlag != STREAM_META_OK_TO_STOP) {
|
||||||
// taosMsleep(100);
|
taosMsleep(100);
|
||||||
// stDebug("vgId:%d wait for meta to stop timer", pMeta->vgId);
|
stDebug("vgId:%d wait for meta to stop timer", pMeta->vgId);
|
||||||
// }
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
stDebug("vgId:%d start to check all tasks", vgId);
|
stDebug("vgId:%d start to check all tasks", vgId);
|
||||||
|
@ -1306,28 +1312,28 @@ void streamMetaResetStartInfo(STaskStartInfo* pStartInfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamMetaRLock(SStreamMeta* pMeta) {
|
void streamMetaRLock(SStreamMeta* pMeta) {
|
||||||
// stTrace("vgId:%d meta-rlock", pMeta->vgId);
|
stTrace("vgId:%d meta-rlock", pMeta->vgId);
|
||||||
taosThreadRwlockRdlock(&pMeta->lock);
|
taosThreadRwlockRdlock(&pMeta->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamMetaRUnLock(SStreamMeta* pMeta) {
|
void streamMetaRUnLock(SStreamMeta* pMeta) {
|
||||||
// stTrace("vgId:%d meta-runlock", pMeta->vgId);
|
stTrace("vgId:%d meta-runlock", pMeta->vgId);
|
||||||
int32_t code = taosThreadRwlockUnlock(&pMeta->lock);
|
int32_t code = taosThreadRwlockUnlock(&pMeta->lock);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
stError("vgId:%d meta-runlock failed, code:%d", pMeta->vgId, code);
|
stError("vgId:%d meta-runlock failed, code:%d", pMeta->vgId, code);
|
||||||
} else {
|
} else {
|
||||||
// stDebug("vgId:%d meta-runlock completed", pMeta->vgId);
|
stDebug("vgId:%d meta-runlock completed", pMeta->vgId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamMetaWLock(SStreamMeta* pMeta) {
|
void streamMetaWLock(SStreamMeta* pMeta) {
|
||||||
// stTrace("vgId:%d meta-wlock", pMeta->vgId);
|
stTrace("vgId:%d meta-wlock", pMeta->vgId);
|
||||||
taosThreadRwlockWrlock(&pMeta->lock);
|
taosThreadRwlockWrlock(&pMeta->lock);
|
||||||
// stTrace("vgId:%d meta-wlock completed", pMeta->vgId);
|
stTrace("vgId:%d meta-wlock completed", pMeta->vgId);
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamMetaWUnLock(SStreamMeta* pMeta) {
|
void streamMetaWUnLock(SStreamMeta* pMeta) {
|
||||||
// stTrace("vgId:%d meta-wunlock", pMeta->vgId);
|
stTrace("vgId:%d meta-wunlock", pMeta->vgId);
|
||||||
taosThreadRwlockUnlock(&pMeta->lock);
|
taosThreadRwlockUnlock(&pMeta->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1054,6 +1054,24 @@ int32_t tDecodeStreamTaskCheckRsp(SDecoder* pDecoder, SStreamTaskCheckRsp* pRsp)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t tEncodeStreamTaskCheckpointReq(SEncoder* pEncoder, const SStreamTaskCheckpointReq* pReq) {
|
||||||
|
if (tStartEncode(pEncoder) < 0) return -1;
|
||||||
|
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
|
||||||
|
if (tEncodeI32(pEncoder, pReq->taskId) < 0) return -1;
|
||||||
|
if (tEncodeI32(pEncoder, pReq->nodeId) < 0) return -1;
|
||||||
|
tEndEncode(pEncoder);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t tDecodeStreamTaskCheckpointReq(SDecoder* pDecoder, SStreamTaskCheckpointReq* pReq) {
|
||||||
|
if (tStartDecode(pDecoder) < 0) return -1;
|
||||||
|
if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1;
|
||||||
|
if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1;
|
||||||
|
if (tDecodeI32(pDecoder, &pReq->nodeId) < 0) return -1;
|
||||||
|
tEndDecode(pDecoder);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t tEncodeStreamScanHistoryFinishReq(SEncoder* pEncoder, const SStreamScanHistoryFinishReq* pReq) {
|
int32_t tEncodeStreamScanHistoryFinishReq(SEncoder* pEncoder, const SStreamScanHistoryFinishReq* pReq) {
|
||||||
if (tStartEncode(pEncoder) < 0) return -1;
|
if (tStartEncode(pEncoder) < 0) return -1;
|
||||||
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
|
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
|
||||||
|
|
|
@ -733,20 +733,30 @@ int8_t streamTaskSetSchedStatusInactive(SStreamTask* pTask) {
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamTaskClearHTaskAttr(SStreamTask* pTask) {
|
int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, bool metaLock) {
|
||||||
SStreamMeta* pMeta = pTask->pMeta;
|
SStreamMeta* pMeta = pTask->pMeta;
|
||||||
|
STaskId sTaskId = {.streamId = pTask->streamTaskId.streamId, .taskId = pTask->streamTaskId.taskId};
|
||||||
if (pTask->info.fillHistory == 0) {
|
if (pTask->info.fillHistory == 0) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
STaskId sTaskId = {.streamId = pTask->streamTaskId.streamId, .taskId = pTask->streamTaskId.taskId};
|
if (metaLock) {
|
||||||
SStreamTask** ppStreamTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &sTaskId, sizeof(sTaskId));
|
streamMetaWLock(pTask->pMeta);
|
||||||
|
}
|
||||||
|
|
||||||
|
SStreamTask** ppStreamTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &sTaskId, sizeof(sTaskId));
|
||||||
if (ppStreamTask != NULL) {
|
if (ppStreamTask != NULL) {
|
||||||
CLEAR_RELATED_FILLHISTORY_TASK((*ppStreamTask));
|
|
||||||
streamMetaSaveTask(pMeta, *ppStreamTask);
|
|
||||||
stDebug("s-task:%s clear the related stream task:0x%x attr to fill-history task", pTask->id.idStr,
|
stDebug("s-task:%s clear the related stream task:0x%x attr to fill-history task", pTask->id.idStr,
|
||||||
(int32_t)sTaskId.taskId);
|
(int32_t)sTaskId.taskId);
|
||||||
|
|
||||||
|
taosThreadMutexLock(&(*ppStreamTask)->lock);
|
||||||
|
CLEAR_RELATED_FILLHISTORY_TASK((*ppStreamTask));
|
||||||
|
streamMetaSaveTask(pMeta, *ppStreamTask);
|
||||||
|
taosThreadMutexUnlock(&(*ppStreamTask)->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (metaLock) {
|
||||||
|
streamMetaWUnLock(pTask->pMeta);
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -852,3 +862,41 @@ void streamTaskResume(SStreamTask* pTask) {
|
||||||
bool streamTaskIsSinkTask(const SStreamTask* pTask) {
|
bool streamTaskIsSinkTask(const SStreamTask* pTask) {
|
||||||
return pTask->info.taskLevel == TASK_LEVEL__SINK;
|
return pTask->info.taskLevel == TASK_LEVEL__SINK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t streamTaskSendCheckpointReq(SStreamTask* pTask) {
|
||||||
|
int32_t code;
|
||||||
|
int32_t tlen = 0;
|
||||||
|
int32_t vgId = pTask->pMeta->vgId;
|
||||||
|
const char* id = pTask->id.idStr;
|
||||||
|
|
||||||
|
SStreamTaskCheckpointReq req = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId, .nodeId = vgId};
|
||||||
|
tEncodeSize(tEncodeStreamTaskCheckpointReq, &req, tlen, code);
|
||||||
|
if (code < 0) {
|
||||||
|
stError("s-task:%s vgId:%d encode stream task req checkpoint failed, code:%s", id, vgId, tstrerror(code));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
void* buf = rpcMallocCont(tlen);
|
||||||
|
if (buf == NULL) {
|
||||||
|
stError("s-task:%s vgId:%d encode stream task req checkpoint msg failed, code:%s", id, vgId,
|
||||||
|
tstrerror(TSDB_CODE_OUT_OF_MEMORY));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
SEncoder encoder;
|
||||||
|
tEncoderInit(&encoder, buf, tlen);
|
||||||
|
if ((code = tEncodeStreamTaskCheckpointReq(&encoder, &req)) < 0) {
|
||||||
|
rpcFreeCont(buf);
|
||||||
|
stError("s-task:%s vgId:%d encode stream task req checkpoint msg failed, code:%s", id, vgId, tstrerror(code));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
tEncoderClear(&encoder);
|
||||||
|
|
||||||
|
SRpcMsg msg = {.info.noResp = 1};
|
||||||
|
initRpcMsg(&msg, TDMT_MND_STREAM_REQ_CHKPT, buf, tlen);
|
||||||
|
|
||||||
|
stDebug("s-task:%s vgId:%d build and send task checkpoint req", id, vgId);
|
||||||
|
|
||||||
|
tmsgSendReq(&pTask->info.mnodeEpset, &msg);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
|
@ -31,9 +31,13 @@ SStreamTaskState StreamTaskStatusList[9] = {
|
||||||
{.state = TASK_STATUS__HALT, .name = "halt"},
|
{.state = TASK_STATUS__HALT, .name = "halt"},
|
||||||
{.state = TASK_STATUS__PAUSE, .name = "paused"},
|
{.state = TASK_STATUS__PAUSE, .name = "paused"},
|
||||||
{.state = TASK_STATUS__CK, .name = "checkpoint"},
|
{.state = TASK_STATUS__CK, .name = "checkpoint"},
|
||||||
// {.state = TASK_STATUS__STREAM_SCAN_HISTORY, .name = "stream-scan-history"},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
typedef struct SStreamEventInfo {
|
||||||
|
EStreamTaskEvent event;
|
||||||
|
const char* name;
|
||||||
|
} SStreamEventInfo;
|
||||||
|
|
||||||
SStreamEventInfo StreamTaskEventList[12] = {
|
SStreamEventInfo StreamTaskEventList[12] = {
|
||||||
{.event = 0, .name = ""}, // dummy event, place holder
|
{.event = 0, .name = ""}, // dummy event, place holder
|
||||||
{.event = TASK_EVENT_INIT, .name = "initialize"},
|
{.event = TASK_EVENT_INIT, .name = "initialize"},
|
||||||
|
@ -94,7 +98,9 @@ int32_t streamTaskSendTransSuccessMsg(SStreamTask* pTask) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamTaskKeepCurrentVerInWal(SStreamTask* pTask) {
|
int32_t streamTaskKeepCurrentVerInWal(SStreamTask* pTask) {
|
||||||
ASSERT(HAS_RELATED_FILLHISTORY_TASK(pTask));
|
if (!HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||||
|
stError("s-task:%s no related fill-history task, since it may have been dropped already", pTask->id.idStr);
|
||||||
|
}
|
||||||
|
|
||||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||||
pTask->hTaskInfo.haltVer = walReaderGetCurrentVer(pTask->exec.pWalReader);
|
pTask->hTaskInfo.haltVer = walReaderGetCurrentVer(pTask->exec.pWalReader);
|
||||||
|
@ -402,6 +408,10 @@ SStreamTaskState* streamTaskGetStatus(const SStreamTask* pTask) {
|
||||||
return &pTask->status.pSM->current; // copy one obj in case of multi-thread environment
|
return &pTask->status.pSM->current; // copy one obj in case of multi-thread environment
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ETaskStatus streamTaskGetPrevStatus(const SStreamTask* pTask) {
|
||||||
|
return pTask->status.pSM->prev.state.state;
|
||||||
|
}
|
||||||
|
|
||||||
const char* streamTaskGetStatusStr(ETaskStatus status) {
|
const char* streamTaskGetStatusStr(ETaskStatus status) {
|
||||||
return StreamTaskStatusList[status].name;
|
return StreamTaskStatusList[status].name;
|
||||||
}
|
}
|
||||||
|
@ -497,6 +507,8 @@ void doInitStateTransferTable(void) {
|
||||||
// checkpoint related event
|
// checkpoint related event
|
||||||
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__CK, TASK_EVENT_GEN_CHECKPOINT, NULL, streamTaskDoCheckpoint, NULL, true);
|
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__CK, TASK_EVENT_GEN_CHECKPOINT, NULL, streamTaskDoCheckpoint, NULL, true);
|
||||||
taosArrayPush(streamTaskSMTrans, &trans);
|
taosArrayPush(streamTaskSMTrans, &trans);
|
||||||
|
trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__CK, TASK_EVENT_GEN_CHECKPOINT, NULL, streamTaskDoCheckpoint, NULL, true);
|
||||||
|
taosArrayPush(streamTaskSMTrans, &trans);
|
||||||
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__READY, TASK_EVENT_CHECKPOINT_DONE, NULL, NULL, NULL, true);
|
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__READY, TASK_EVENT_CHECKPOINT_DONE, NULL, NULL, NULL, true);
|
||||||
taosArrayPush(streamTaskSMTrans, &trans);
|
taosArrayPush(streamTaskSMTrans, &trans);
|
||||||
|
|
||||||
|
|
|
@ -305,7 +305,7 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t walSkipFetchBody(SWalReader *pRead) {
|
int32_t walSkipFetchBody(SWalReader *pRead) {
|
||||||
wDebug("vgId:%d, skip fetch body:%" PRId64 ", first:%" PRId64 ", commit:%" PRId64 ", last:%" PRId64
|
wDebug("vgId:%d, skip:%" PRId64 ", first:%" PRId64 ", commit:%" PRId64 ", last:%" PRId64
|
||||||
", applied:%" PRId64 ", 0x%" PRIx64,
|
", applied:%" PRId64 ", 0x%" PRIx64,
|
||||||
pRead->pWal->cfg.vgId, pRead->pHead->head.version, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer,
|
pRead->pWal->cfg.vgId, pRead->pHead->head.version, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer,
|
||||||
pRead->pWal->vers.lastVer, pRead->pWal->vers.appliedVer, pRead->readerId);
|
pRead->pWal->vers.lastVer, pRead->pWal->vers.appliedVer, pRead->readerId);
|
||||||
|
|
|
@ -52,12 +52,12 @@ class TDTestCase(TBase):
|
||||||
|
|
||||||
tdLog.printNoPrefix("==========step3:fill data")
|
tdLog.printNoPrefix("==========step3:fill data")
|
||||||
|
|
||||||
tdSql.query(f"select first(point_value) as pointValue from {dbname}.{tbname} where wstart between '2023-12-26 10:35:00' and '2023-12-26 10:40:00' interval(1M) fill(prev) order by wstart desc limit 100")
|
sql = f"select first(point_value) as pointValue from {dbname}.{tbname} where wstart between '2023-12-26 10:35:00' and '2023-12-26 10:40:00' interval(1M) fill(prev) order by wstart desc limit 100"
|
||||||
data = []
|
data = []
|
||||||
for i in range(6):
|
for i in range(6):
|
||||||
row = [5]
|
row = [5]
|
||||||
data.append(row)
|
data.append(row)
|
||||||
tdSql.checkDataMem(data)
|
tdSql.checkDataMem(sql, data)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
tdSql.close()
|
tdSql.close()
|
||||||
|
|
|
@ -236,6 +236,165 @@ class TDTestCase(TBase):
|
||||||
if int(reals[k]) != v:
|
if int(reals[k]) != v:
|
||||||
tdLog.exit(f"distribute {k} expect: {v} real: {reals[k]}")
|
tdLog.exit(f"distribute {k} expect: {v} real: {reals[k]}")
|
||||||
|
|
||||||
|
def checkNull(self):
|
||||||
|
# abs unique concat_ws
|
||||||
|
ts = self.start_timestamp + 1
|
||||||
|
sql = f"insert into {self.db}.d0(ts) values({ts})"
|
||||||
|
tdSql.execute(sql)
|
||||||
|
sql = f'''select abs(fc),
|
||||||
|
unique(ic),
|
||||||
|
concat_ws(',',bin,nch),
|
||||||
|
timetruncate(bi,1s,0),
|
||||||
|
timediff(ic,bi,1s),
|
||||||
|
to_timestamp(nch,'yyyy-mm-dd hh:mi:ss.ms.us.ns')
|
||||||
|
from {self.db}.d0 where ts={ts}'''
|
||||||
|
tdSql.query(sql)
|
||||||
|
tdSql.checkData(0, 0, "None")
|
||||||
|
tdSql.checkData(0, 1, "None")
|
||||||
|
tdSql.checkData(0, 2, "None")
|
||||||
|
tdSql.checkData(0, 3, "None")
|
||||||
|
tdSql.checkData(0, 4, "None")
|
||||||
|
|
||||||
|
|
||||||
|
# substr from 0 start
|
||||||
|
sql1 = f"select substr(bin,1) from {self.db}.d0 order by ts desc limit 100"
|
||||||
|
sql2 = f"select bin from {self.db}.d0 order by ts desc limit 100"
|
||||||
|
self.checkSameResult(sql1, sql2)
|
||||||
|
#substr error input pos is zero
|
||||||
|
sql = f"select substr(bin,0,3) from {self.db}.d0 order by ts desc limit 100"
|
||||||
|
tdSql.error(sql)
|
||||||
|
|
||||||
|
# cast
|
||||||
|
nch = 99
|
||||||
|
sql = f"insert into {self.db}.d0(ts, nch) values({ts}, '{nch}')"
|
||||||
|
tdSql.execute(sql)
|
||||||
|
sql = f"select cast(nch as tinyint), \
|
||||||
|
cast(nch as tinyint unsigned), \
|
||||||
|
cast(nch as smallint), \
|
||||||
|
cast(nch as smallint unsigned), \
|
||||||
|
cast(nch as int unsigned), \
|
||||||
|
cast(nch as bigint unsigned), \
|
||||||
|
cast(nch as float), \
|
||||||
|
cast(nch as double), \
|
||||||
|
cast(nch as bool) \
|
||||||
|
from {self.db}.d0 where ts={ts}"
|
||||||
|
row = [nch, nch, nch, nch, nch, nch, nch, nch, True]
|
||||||
|
tdSql.checkDataMem(sql, [row])
|
||||||
|
|
||||||
|
# cast string is zero
|
||||||
|
ts += 1
|
||||||
|
sql = f"insert into {self.db}.d0(ts, nch) values({ts}, 'abcd')"
|
||||||
|
tdSql.execute(sql)
|
||||||
|
sql = f"select cast(nch as tinyint) from {self.db}.d0 where ts={ts}"
|
||||||
|
tdSql.checkFirstValue(sql, 0)
|
||||||
|
|
||||||
|
# iso8601
|
||||||
|
sql = f'select ts,to_iso8601(ts,"Z"),to_iso8601(ts,"+08"),to_iso8601(ts,"-08") from {self.db}.d0 where ts={self.start_timestamp}'
|
||||||
|
row = ['2023-11-15 06:13:20.000','2023-11-14T22:13:20.000Z','2023-11-15T06:13:20.000+08','2023-11-14T14:13:20.000-08']
|
||||||
|
tdSql.checkDataMem(sql, [row])
|
||||||
|
|
||||||
|
# constant expr funciton
|
||||||
|
|
||||||
|
# count
|
||||||
|
sql = f"select count(1),count(null) from {self.db}.d0"
|
||||||
|
tdSql.checkDataMem(sql, [[self.insert_rows+2, 0]])
|
||||||
|
|
||||||
|
row = [10, 11.0, "None", 2]
|
||||||
|
# sum
|
||||||
|
sql = "select sum(1+9),sum(1.1 + 9.9),sum(null),sum(4/2);"
|
||||||
|
tdSql.checkDataMem(sql, [row])
|
||||||
|
# min
|
||||||
|
sql = "select min(1+9),min(1.1 + 9.9),min(null),min(4/2);"
|
||||||
|
tdSql.checkDataMem(sql, [row])
|
||||||
|
# max
|
||||||
|
sql = "select max(1+9),max(1.1 + 9.9),max(null),max(4/2);"
|
||||||
|
tdSql.checkDataMem(sql, [row])
|
||||||
|
# avg
|
||||||
|
sql = "select avg(1+9),avg(1.1 + 9.9),avg(null),avg(4/2);"
|
||||||
|
tdSql.checkDataMem(sql, [row])
|
||||||
|
# stddev
|
||||||
|
sql = "select stddev(1+9),stddev(1.1 + 9.9),stddev(null),stddev(4/2);"
|
||||||
|
tdSql.checkDataMem(sql, [[0, 0.0, "None", 0]])
|
||||||
|
# leastsquares
|
||||||
|
sql = "select leastsquares(100,2,1), leastsquares(100.2,2.1,1);"
|
||||||
|
tdSql.query(sql)
|
||||||
|
# derivative
|
||||||
|
sql = "select derivative(190999,38.3,1);"
|
||||||
|
tdSql.checkFirstValue(sql, 0.0)
|
||||||
|
# irate
|
||||||
|
sql = "select irate(0);"
|
||||||
|
tdSql.checkFirstValue(sql, 0.0)
|
||||||
|
# diff
|
||||||
|
sql = "select diff(0);"
|
||||||
|
tdSql.checkFirstValue(sql, 0.0)
|
||||||
|
# twa
|
||||||
|
sql = "select twa(10);"
|
||||||
|
tdSql.checkFirstValue(sql, 10.0)
|
||||||
|
# mavg
|
||||||
|
sql = "select mavg(5,10);"
|
||||||
|
tdSql.checkFirstValue(sql, 5)
|
||||||
|
# mavg
|
||||||
|
sql = "select mavg(5,10);"
|
||||||
|
tdSql.checkFirstValue(sql, 5)
|
||||||
|
# mavg
|
||||||
|
sql = "select csum(4+9);"
|
||||||
|
tdSql.checkFirstValue(sql, 13)
|
||||||
|
# tail
|
||||||
|
sql = "select tail(1+9,1),tail(1.1 + 9.9,2),tail(null,3),tail(8/4,3);"
|
||||||
|
tdSql.error(sql)
|
||||||
|
sql = "select tail(4+9, 3);"
|
||||||
|
tdSql.checkFirstValue(sql, 13)
|
||||||
|
sql = "select tail(null, 1);"
|
||||||
|
tdSql.checkFirstValue(sql, "None")
|
||||||
|
# top
|
||||||
|
sql = "select top(4+9, 3);"
|
||||||
|
tdSql.checkFirstValue(sql, 13)
|
||||||
|
sql = "select top(9.9, 3);"
|
||||||
|
tdSql.checkFirstValue(sql, 9.9)
|
||||||
|
sql = "select top(null, 1);"
|
||||||
|
tdSql.error(sql)
|
||||||
|
# bottom
|
||||||
|
sql = "select bottom(4+9, 3);"
|
||||||
|
tdSql.checkFirstValue(sql, 13)
|
||||||
|
sql = "select bottom(9.9, 3);"
|
||||||
|
tdSql.checkFirstValue(sql, 9.9)
|
||||||
|
|
||||||
|
ops = ['GE', 'GT', 'LE', 'LT', 'EQ', 'NE']
|
||||||
|
vals = [-1, -1, 1, 1, -1, 1]
|
||||||
|
cnt = len(ops)
|
||||||
|
for i in range(cnt):
|
||||||
|
# statecount
|
||||||
|
sql = f"select statecount(99,'{ops[i]}',100);"
|
||||||
|
tdSql.checkFirstValue(sql, vals[i])
|
||||||
|
sql = f"select statecount(9.9,'{ops[i]}',11.1);"
|
||||||
|
tdSql.checkFirstValue(sql, vals[i])
|
||||||
|
# stateduration
|
||||||
|
sql = f"select stateduration(99,'{ops[i]}',100,1s);"
|
||||||
|
#tdSql.checkFirstValue(sql, vals[i]) bug need fix
|
||||||
|
tdSql.execute(sql)
|
||||||
|
sql = f"select stateduration(9.9,'{ops[i]}',11.1,1s);"
|
||||||
|
#tdSql.checkFirstValue(sql, vals[i]) bug need fix
|
||||||
|
tdSql.execute(sql)
|
||||||
|
|
||||||
|
# histogram check crash
|
||||||
|
sqls = [
|
||||||
|
'select histogram(200,"user_input","[10, 50, 200]",0);',
|
||||||
|
'select histogram(22.2,"user_input","[1.01, 5.01, 200.1]",0);',
|
||||||
|
'select histogram(200,"linear_bin",\'{"start": 0.0,"width": 5.0, "count": 5, "infinity": true}\',0)',
|
||||||
|
'select histogram(200.2,"linear_bin",\'{"start": 0.0,"width": 5.01, "count": 5, "infinity": true}\',0)',
|
||||||
|
'select histogram(200,"log_bin",\'{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}\',0)',
|
||||||
|
'select histogram(200.2,"log_bin",\'{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}\',0)'
|
||||||
|
]
|
||||||
|
tdSql.executes(sqls)
|
||||||
|
# errors check
|
||||||
|
sql = 'select histogram(200.2,"log_bin",\'start":1.0, "factor: 2.0, "count": 5, "infinity": true}\',0)'
|
||||||
|
tdSql.error(sql)
|
||||||
|
sql = 'select histogram("200.2","log_bin",\'start":1.0, "factor: 2.0, "count": 5, "infinity": true}\',0)'
|
||||||
|
tdSql.error(sql)
|
||||||
|
|
||||||
|
# first last
|
||||||
|
sql = "select first(100-90-1),last(2*5),first(11.1),last(22.2)"
|
||||||
|
tdSql.checkDataMem(sql, [[9, 10, 11.1, 22.2]])
|
||||||
|
|
||||||
# run
|
# run
|
||||||
def run(self):
|
def run(self):
|
||||||
|
@ -253,6 +412,9 @@ class TDTestCase(TBase):
|
||||||
# do action
|
# do action
|
||||||
self.doQuery()
|
self.doQuery()
|
||||||
|
|
||||||
|
# check null
|
||||||
|
self.checkNull()
|
||||||
|
|
||||||
tdLog.success(f"{__file__} successfully executed")
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -254,8 +254,6 @@ class TDSql:
|
||||||
tdLog.info("sql:%s, expected expectErrInfo %s occured" % (sql, expectErrInfo))
|
tdLog.info("sql:%s, expected expectErrInfo %s occured" % (sql, expectErrInfo))
|
||||||
else:
|
else:
|
||||||
tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo %s occured, but not expected errno %s" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo))
|
tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo %s occured, but not expected errno %s" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo))
|
||||||
else:
|
|
||||||
tdLog.info("sql:%s, expect error occured" % (sql))
|
|
||||||
|
|
||||||
return self.error_info
|
return self.error_info
|
||||||
|
|
||||||
|
@ -402,7 +400,14 @@ class TDSql:
|
||||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data)
|
args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data)
|
||||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||||
else:
|
else:
|
||||||
if self.res[row][col].astimezone(datetime.timezone.utc) == _parse_datetime(data).astimezone(datetime.timezone.utc):
|
print(f"{self.res[row][col]}")
|
||||||
|
real = self.res[row][col]
|
||||||
|
if real is None:
|
||||||
|
# none
|
||||||
|
if str(real) == data:
|
||||||
|
if(show):
|
||||||
|
tdLog.info("check successfully")
|
||||||
|
elif real.astimezone(datetime.timezone.utc) == _parse_datetime(data).astimezone(datetime.timezone.utc):
|
||||||
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.res[row][col]} == expect:{data}")
|
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.res[row][col]} == expect:{data}")
|
||||||
if(show):
|
if(show):
|
||||||
tdLog.info("check successfully")
|
tdLog.info("check successfully")
|
||||||
|
@ -490,7 +495,8 @@ class TDSql:
|
||||||
if(show):
|
if(show):
|
||||||
tdLog.info("check successfully")
|
tdLog.info("check successfully")
|
||||||
|
|
||||||
def checkDataMem(self, mem):
|
def checkDataMem(self, sql, mem):
|
||||||
|
self.query(sql)
|
||||||
if not isinstance(mem, list):
|
if not isinstance(mem, list):
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
args = (caller.filename, caller.lineno, self.sql)
|
args = (caller.filename, caller.lineno, self.sql)
|
||||||
|
@ -506,7 +512,7 @@ class TDSql:
|
||||||
self.checkData(row, col, colData)
|
self.checkData(row, col, colData)
|
||||||
tdLog.info("check successfully")
|
tdLog.info("check successfully")
|
||||||
|
|
||||||
def checkDataCsv(self, csvfilePath):
|
def checkDataCsv(self, sql, csvfilePath):
|
||||||
if not isinstance(csvfilePath, str) or len(csvfilePath) == 0:
|
if not isinstance(csvfilePath, str) or len(csvfilePath) == 0:
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
args = (caller.filename, caller.lineno, self.sql, csvfilePath)
|
args = (caller.filename, caller.lineno, self.sql, csvfilePath)
|
||||||
|
@ -530,7 +536,7 @@ class TDSql:
|
||||||
tdLog.exit("%s(%d) failed: sql:%s, expect csvfile path:%s, read error:%s" % args)
|
tdLog.exit("%s(%d) failed: sql:%s, expect csvfile path:%s, read error:%s" % args)
|
||||||
|
|
||||||
tdLog.info("read csvfile read successfully")
|
tdLog.info("read csvfile read successfully")
|
||||||
self.checkDataMem(data)
|
self.checkDataMem(sql, data)
|
||||||
|
|
||||||
# return true or false replace exit, no print out
|
# return true or false replace exit, no print out
|
||||||
def checkRowColNoExit(self, row, col):
|
def checkRowColNoExit(self, row, col):
|
||||||
|
|
|
@ -18,6 +18,7 @@ sql use test;
|
||||||
sql create table t1(ts timestamp, a int, b int , c int, d double);
|
sql create table t1(ts timestamp, a int, b int , c int, d double);
|
||||||
|
|
||||||
sql create stream stream1 trigger at_once fill_history 1 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s);
|
sql create stream stream1 trigger at_once fill_history 1 IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s);
|
||||||
|
sleep 1000
|
||||||
|
|
||||||
sql insert into t1 values(1648791213000,1,2,3,1.0);
|
sql insert into t1 values(1648791213000,1,2,3,1.0);
|
||||||
sql insert into t1 values(1648791223001,2,2,3,1.1);
|
sql insert into t1 values(1648791223001,2,2,3,1.1);
|
||||||
|
@ -224,53 +225,53 @@ endi
|
||||||
|
|
||||||
# row 2
|
# row 2
|
||||||
if $data21 != 1 then
|
if $data21 != 1 then
|
||||||
print ======$data21
|
print ======$data21, expect 1
|
||||||
goto loop01
|
goto loop01
|
||||||
endi
|
endi
|
||||||
|
|
||||||
if $data22 != 1 then
|
if $data22 != 1 then
|
||||||
print ======$data22
|
print ======$data22 , expect 1
|
||||||
goto loop01
|
goto loop01
|
||||||
endi
|
endi
|
||||||
|
|
||||||
if $data23 != 3 then
|
if $data23 != 3 then
|
||||||
print ======$data23
|
print ======$data23 , expect 3
|
||||||
goto loop01
|
goto loop01
|
||||||
endi
|
endi
|
||||||
|
|
||||||
if $data24 != 2 then
|
if $data24 != 2 then
|
||||||
print ======$data24
|
print ======$data24 , expect 2
|
||||||
goto loop01
|
goto loop01
|
||||||
endi
|
endi
|
||||||
|
|
||||||
if $data25 != 3 then
|
if $data25 != 3 then
|
||||||
print ======$data25
|
print ======$data25 , expect 3
|
||||||
goto loop01
|
goto loop01
|
||||||
endi
|
endi
|
||||||
|
|
||||||
# row 3
|
# row 3
|
||||||
if $data31 != 1 then
|
if $data31 != 1 then
|
||||||
print ======$data31
|
print ======$data31 , expect 1
|
||||||
goto loop01
|
goto loop01
|
||||||
endi
|
endi
|
||||||
|
|
||||||
if $data32 != 1 then
|
if $data32 != 1 then
|
||||||
print ======$data32
|
print ======$data32 , expect 1
|
||||||
goto loop01
|
goto loop01
|
||||||
endi
|
endi
|
||||||
|
|
||||||
if $data33 != 4 then
|
if $data33 != 4 then
|
||||||
print ======$data33
|
print ======$data33 , expect 4
|
||||||
goto loop01
|
goto loop01
|
||||||
endi
|
endi
|
||||||
|
|
||||||
if $data34 != 2 then
|
if $data34 != 2 then
|
||||||
print ======$data34
|
print ======$data34 , expect 2
|
||||||
goto loop01
|
goto loop01
|
||||||
endi
|
endi
|
||||||
|
|
||||||
if $data35 != 3 then
|
if $data35 != 3 then
|
||||||
print ======$data35
|
print ======$data35 , expect 3
|
||||||
goto loop01
|
goto loop01
|
||||||
endi
|
endi
|
||||||
|
|
||||||
|
|
|
@ -224,6 +224,40 @@ class TDTestCase:
|
||||||
sql = f"select timediff(ts - {val}b, ts1) from st "
|
sql = f"select timediff(ts - {val}b, ts1) from st "
|
||||||
self.checkExpect(sql, val)
|
self.checkExpect(sql, val)
|
||||||
|
|
||||||
|
# timetruncate check
|
||||||
|
sql = '''select ts,timetruncate(ts,1u),
|
||||||
|
timetruncate(ts,1b),
|
||||||
|
timetruncate(ts,1m),
|
||||||
|
timetruncate(ts,1h),
|
||||||
|
timetruncate(ts,1w)
|
||||||
|
from t0 order by ts desc limit 1;'''
|
||||||
|
tdSql.query(sql)
|
||||||
|
tdSql.checkData(0,1, "2023-03-28 18:40:00.000009000")
|
||||||
|
tdSql.checkData(0,2, "2023-03-28 18:40:00.000009999")
|
||||||
|
tdSql.checkData(0,3, "2023-03-28 18:40:00.000000000")
|
||||||
|
tdSql.checkData(0,4, "2023-03-28 18:00:00.000000000")
|
||||||
|
tdSql.checkData(0,5, "2023-03-23 00:00:00.000000000")
|
||||||
|
|
||||||
|
# timediff
|
||||||
|
sql = '''select ts,timediff(ts,ts+1b,1b),
|
||||||
|
timediff(ts,ts+1u,1u),
|
||||||
|
timediff(ts,ts+1a,1a),
|
||||||
|
timediff(ts,ts+1s,1s),
|
||||||
|
timediff(ts,ts+1m,1m),
|
||||||
|
timediff(ts,ts+1h,1h),
|
||||||
|
timediff(ts,ts+1d,1d),
|
||||||
|
timediff(ts,ts+1w,1w)
|
||||||
|
from t0 order by ts desc limit 1;'''
|
||||||
|
tdSql.query(sql)
|
||||||
|
tdSql.checkData(0,1, 1)
|
||||||
|
tdSql.checkData(0,2, 1)
|
||||||
|
tdSql.checkData(0,3, 1)
|
||||||
|
tdSql.checkData(0,4, 1)
|
||||||
|
tdSql.checkData(0,5, 1)
|
||||||
|
tdSql.checkData(0,6, 1)
|
||||||
|
tdSql.checkData(0,7, 1)
|
||||||
|
tdSql.checkData(0,8, 1)
|
||||||
|
|
||||||
# init
|
# init
|
||||||
def init(self, conn, logSql, replicaVar=1):
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
seed = time.time() % 10000
|
seed = time.time() % 10000
|
||||||
|
|
|
@ -218,6 +218,20 @@ class TDTestCase:
|
||||||
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
|
sql = f"select count(ts) from st where timediff(ts - {val}{uint}, ts1) = {usval} "
|
||||||
self.checkExpect(sql, expectVal)
|
self.checkExpect(sql, expectVal)
|
||||||
|
|
||||||
|
# timetruncate check
|
||||||
|
sql = '''select ts,timetruncate(ts,1a),
|
||||||
|
timetruncate(ts,1s),
|
||||||
|
timetruncate(ts,1m),
|
||||||
|
timetruncate(ts,1h),
|
||||||
|
timetruncate(ts,1w)
|
||||||
|
from t0 order by ts desc limit 1;'''
|
||||||
|
tdSql.query(sql)
|
||||||
|
tdSql.checkData(0,1, "2023-03-28 18:40:00.009000")
|
||||||
|
tdSql.checkData(0,2, "2023-03-28 18:40:00.000000")
|
||||||
|
tdSql.checkData(0,3, "2023-03-28 18:40:00.000000")
|
||||||
|
tdSql.checkData(0,4, "2023-03-28 18:00:00.000000")
|
||||||
|
tdSql.checkData(0,5, "2023-03-23 00:00:00.000000")
|
||||||
|
|
||||||
# init
|
# init
|
||||||
def init(self, conn, logSql, replicaVar=1):
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
seed = time.time() % 10000
|
seed = time.time() % 10000
|
||||||
|
|
|
@ -24,6 +24,7 @@ from util.dnodes import tdDnodes
|
||||||
from util.dnodes import *
|
from util.dnodes import *
|
||||||
|
|
||||||
class TDTestCase:
|
class TDTestCase:
|
||||||
|
updatecfgDict = {'debugflag':0,'stdebugFlag': 143 ,"tqDebugflag":135}
|
||||||
|
|
||||||
def init(self, conn, logSql, replicaVar):
|
def init(self, conn, logSql, replicaVar):
|
||||||
tdLog.debug("start to execute %s" % __file__)
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
|
|
@ -103,6 +103,10 @@ class TDTestCase:
|
||||||
f"select statecount(c1 ,'GT',1) , min(c1) from {dbname}.t1",
|
f"select statecount(c1 ,'GT',1) , min(c1) from {dbname}.t1",
|
||||||
f"select statecount(c1 ,'GT',1) , spread(c1) from {dbname}.t1",
|
f"select statecount(c1 ,'GT',1) , spread(c1) from {dbname}.t1",
|
||||||
f"select statecount(c1 ,'GT',1) , diff(c1) from {dbname}.t1",
|
f"select statecount(c1 ,'GT',1) , diff(c1) from {dbname}.t1",
|
||||||
|
f"select statecount(c1 ,'GTA',1) , diff(c1) from {dbname}.t1",
|
||||||
|
f"select statecount(c1 ,'EQA',1) , diff(c1) from {dbname}.t1",
|
||||||
|
f"select statecount(c1 ,'',1) , diff(c1) from {dbname}.t1",
|
||||||
|
f"select statecount(c1 ,'E',1) , diff(c1) from {dbname}.t1",
|
||||||
]
|
]
|
||||||
for error_sql in error_sql_lists:
|
for error_sql in error_sql_lists:
|
||||||
tdSql.error(error_sql)
|
tdSql.error(error_sql)
|
||||||
|
|
|
@ -6,8 +6,8 @@ from util.cases import *
|
||||||
from util.common import *
|
from util.common import *
|
||||||
|
|
||||||
class TDTestCase:
|
class TDTestCase:
|
||||||
updatecfgDict = {'vdebugFlag': 143, 'qdebugflag':135, 'tqdebugflag':135, 'udebugflag':135, 'rpcdebugflag':135,
|
updatecfgDict = {'debugFlag':0, 'vdebugFlag': 143, 'qdebugflag':135, 'tqdebugflag':135, 'udebugflag':135, 'rpcdebugflag':135,
|
||||||
'asynclog': 0, 'stdebugflag':135}
|
'asynclog': 0, 'stdebugflag':143}
|
||||||
def init(self, conn, logSql, replicaVar=1):
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
self.replicaVar = int(replicaVar)
|
self.replicaVar = int(replicaVar)
|
||||||
tdLog.debug("start to execute %s" % __file__)
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
|
Loading…
Reference in New Issue