fix:conflicts from 3.0
This commit is contained in:
commit
d249892b2a
|
@ -3112,7 +3112,7 @@ typedef struct {
|
|||
|
||||
int32_t tSerializeSMDropStreamReq(void* buf, int32_t bufLen, const SMDropStreamReq* pReq);
|
||||
int32_t tDeserializeSMDropStreamReq(void* buf, int32_t bufLen, SMDropStreamReq* pReq);
|
||||
void tFreeSMDropStreamReq(SMDropStreamReq* pReq);
|
||||
void tFreeMDropStreamReq(SMDropStreamReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
char name[TSDB_STREAM_FNAME_LEN];
|
||||
|
|
|
@ -150,19 +150,6 @@ typedef struct {
|
|||
int32_t colNum;
|
||||
} SMetaStbStats;
|
||||
|
||||
// void tqReaderSetColIdList(STqReader *pReader, SArray *pColIdList);
|
||||
// int32_t tqReaderSetTbUidList(STqReader *pReader, const SArray *tbUidList);
|
||||
// int32_t tqReaderAddTbUidList(STqReader *pReader, const SArray *pTableUidList);
|
||||
// int32_t tqReaderRemoveTbUidList(STqReader *pReader, const SArray *tbUidList);
|
||||
// bool tqReaderIsQueriedTable(STqReader* pReader, uint64_t uid);
|
||||
// bool tqCurrentBlockConsumed(const STqReader* pReader);
|
||||
// int32_t tqReaderSeek(STqReader *pReader, int64_t ver, const char *id);
|
||||
// bool tqNextBlockInWal(STqReader* pReader, const char* idstr);
|
||||
// bool tqNextBlockImpl(STqReader *pReader, const char* idstr);
|
||||
// int32_t getTableInfoFromSnapshot(SSnapContext *ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t
|
||||
// *uid); SMetaTableInfo getMetaTableInfoFromSnapshot(SSnapContext *ctx); int32_t setForSnapShot(SSnapContext
|
||||
// *ctx, int64_t uid); int32_t destroySnapContext(SSnapContext *ctx);
|
||||
|
||||
// clang-format off
|
||||
/*-------------------------------------------------new api format---------------------------------------------------*/
|
||||
typedef struct TsdReader {
|
||||
|
@ -197,27 +184,6 @@ typedef struct SStoreCacheReader {
|
|||
// clang-format on
|
||||
|
||||
/*------------------------------------------------------------------------------------------------------------------*/
|
||||
/*
|
||||
void tqReaderSetColIdList(STqReader *pReader, SArray *pColIdList);
|
||||
int32_t tqReaderSetTbUidList(STqReader *pReader, const SArray *tbUidList);
|
||||
int32_t tqReaderAddTbUidList(STqReader *pReader, const SArray *pTableUidList);
|
||||
int32_t tqReaderRemoveTbUidList(STqReader *pReader, const SArray *tbUidList);
|
||||
bool tqReaderIsQueriedTable(STqReader* pReader, uint64_t uid);
|
||||
bool tqCurrentBlockConsumed(const STqReader* pReader);
|
||||
|
||||
int32_t tqReaderSeek(STqReader *pReader, int64_t ver, const char *id);
|
||||
bool tqNextBlockInWal(STqReader* pReader, const char* idstr);
|
||||
bool tqNextBlockImpl(STqReader *pReader, const char* idstr);
|
||||
|
||||
int32_t tqRetrieveDataBlock(STqReader *pReader, SSDataBlock **pRes, const char* idstr);
|
||||
STqReader *tqReaderOpen(void *pVnode);
|
||||
void tqReaderClose(STqReader *);
|
||||
|
||||
int32_t tqReaderSetSubmitMsg(STqReader *pReader, void *msgStr, int32_t msgLen, int64_t ver);
|
||||
bool tqNextDataBlockFilterOut(STqReader *pReader, SHashObj *filterOutUids);
|
||||
SWalReader* tqGetWalReader(STqReader* pReader);
|
||||
int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet);
|
||||
*/
|
||||
// todo rename
|
||||
typedef struct SStoreTqReader {
|
||||
struct STqReader* (*tqReaderOpen)();
|
||||
|
@ -281,28 +247,18 @@ typedef struct SStoreMeta {
|
|||
|
||||
void* (*storeGetIndexInfo)();
|
||||
void* (*getInvertIndex)(void* pVnode);
|
||||
int32_t (*getChildTableList)(
|
||||
void* pVnode, int64_t suid,
|
||||
SArray* list); // support filter and non-filter cases. [vnodeGetCtbIdList & vnodeGetCtbIdListByFilter]
|
||||
int32_t (*storeGetTableList)(void* pVnode, int8_t type, SArray* pList); // vnodeGetStbIdList & vnodeGetAllTableList
|
||||
// support filter and non-filter cases. [vnodeGetCtbIdList & vnodeGetCtbIdListByFilter]
|
||||
int32_t (*getChildTableList)( void* pVnode, int64_t suid, SArray* list);
|
||||
int32_t (*storeGetTableList)(void* pVnode, int8_t type, SArray* pList);
|
||||
void* storeGetVersionRange;
|
||||
void* storeGetLastTimestamp;
|
||||
|
||||
int32_t (*getTableSchema)(void* pVnode, int64_t uid, STSchema** pSchema, int64_t* suid); // tsdbGetTableSchema
|
||||
int32_t (*getNumOfChildTables)( void* pVnode, int64_t uid, int64_t* numOfTables, int32_t* numOfCols);
|
||||
void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables, int64_t* numOfNormalTables);
|
||||
|
||||
// db name, vgId, numOfTables, numOfSTables
|
||||
int32_t (*getNumOfChildTables)(
|
||||
void* pVnode, int64_t uid, int64_t* numOfTables,
|
||||
int32_t* numOfCols); // int32_t metaGetStbStats(SMeta *pMeta, int64_t uid, SMetaStbStats *pInfo);
|
||||
void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables,
|
||||
int64_t* numOfNormalTables); // vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) &
|
||||
// metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta);
|
||||
int64_t (*getNumOfRowsInMem)(void* pVnode);
|
||||
/**
|
||||
int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list);
|
||||
int32_t vnodeGetCtbIdListByFilter(void *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg);
|
||||
int32_t vnodeGetStbIdList(void *pVnode, int64_t suid, SArray *list);
|
||||
*/
|
||||
|
||||
SMCtbCursor* (*openCtbCursor)(void *pVnode, tb_uid_t uid, int lock);
|
||||
int32_t (*resumeCtbCursor)(SMCtbCursor* pCtbCur, int8_t first);
|
||||
void (*pauseCtbCursor)(SMCtbCursor* pCtbCur);
|
||||
|
|
|
@ -34,17 +34,16 @@ extern "C" {
|
|||
#define SIZE_IN_MiB(_v) ((_v) / ONE_MiB_F)
|
||||
#define SIZE_IN_KiB(_v) ((_v) / ONE_KiB_F)
|
||||
|
||||
#define TASK_DOWNSTREAM_READY 0x0
|
||||
#define TASK_DOWNSTREAM_NOT_READY 0x1
|
||||
#define TASK_DOWNSTREAM_NOT_LEADER 0x2
|
||||
#define TASK_UPSTREAM_NEW_STAGE 0x3
|
||||
|
||||
#define TASK_DOWNSTREAM_READY 0x0
|
||||
#define TASK_DOWNSTREAM_NOT_READY 0x1
|
||||
#define TASK_DOWNSTREAM_NOT_LEADER 0x2
|
||||
#define TASK_UPSTREAM_NEW_STAGE 0x3
|
||||
#define NODE_ROLE_UNINIT 0x1
|
||||
#define NODE_ROLE_LEADER 0x2
|
||||
#define NODE_ROLE_FOLLOWER 0x3
|
||||
|
||||
#define NODE_ROLE_UNINIT 0x1
|
||||
#define NODE_ROLE_LEADER 0x2
|
||||
#define NODE_ROLE_FOLLOWER 0x3
|
||||
|
||||
#define HAS_RELATED_FILLHISTORY_TASK(_t) ((_t)->hTaskInfo.id.taskId != 0)
|
||||
#define HAS_RELATED_FILLHISTORY_TASK(_t) ((_t)->hTaskInfo.id.taskId != 0)
|
||||
#define CLEAR_RELATED_FILLHISTORY_TASK(_t) \
|
||||
do { \
|
||||
(_t)->hTaskInfo.id.taskId = 0; \
|
||||
|
@ -163,7 +162,7 @@ typedef struct {
|
|||
typedef struct {
|
||||
int8_t type;
|
||||
int64_t ver;
|
||||
SArray* submits; // SArray<SPackedSubmit>
|
||||
SArray* submits; // SArray<SPackedSubmit>
|
||||
} SStreamMergedSubmit;
|
||||
|
||||
typedef struct {
|
||||
|
@ -255,7 +254,7 @@ typedef struct {
|
|||
} SScanhistoryDataInfo;
|
||||
|
||||
typedef struct {
|
||||
int32_t idleDuration; // idle time before use time slice the continue execute scan-history
|
||||
int32_t idleDuration; // idle time before use time slice the continue execute scan-history
|
||||
int32_t numOfTicks;
|
||||
tmr_h pTimer;
|
||||
int32_t execCount;
|
||||
|
@ -305,10 +304,11 @@ typedef struct SStreamTaskId {
|
|||
typedef struct SCheckpointInfo {
|
||||
int64_t startTs;
|
||||
int64_t checkpointId;
|
||||
int64_t checkpointVer; // latest checkpointId version
|
||||
int64_t processedVer; // already processed ver, that has generated results version.
|
||||
int64_t nextProcessVer; // current offset in WAL, not serialize it
|
||||
int64_t failedId; // record the latest failed checkpoint id
|
||||
int64_t checkpointVer; // latest checkpointId version
|
||||
int64_t processedVer; // already processed ver, that has generated results version.
|
||||
int64_t nextProcessVer; // current offset in WAL, not serialize it
|
||||
int64_t failedId; // record the latest failed checkpoint id
|
||||
bool dispatchCheckpointTrigger;
|
||||
} SCheckpointInfo;
|
||||
|
||||
typedef struct SStreamStatus {
|
||||
|
@ -335,22 +335,22 @@ typedef struct SSTaskBasicInfo {
|
|||
int32_t selfChildId;
|
||||
int32_t totalLevel;
|
||||
int8_t taskLevel;
|
||||
int8_t fillHistory; // is fill history task or not
|
||||
int64_t triggerParam; // in msec
|
||||
int8_t fillHistory; // is fill history task or not
|
||||
int64_t triggerParam; // in msec
|
||||
} SSTaskBasicInfo;
|
||||
|
||||
typedef struct SStreamDispatchReq SStreamDispatchReq;
|
||||
typedef struct STokenBucket STokenBucket;
|
||||
typedef struct SMetaHbInfo SMetaHbInfo;
|
||||
typedef struct STokenBucket STokenBucket;
|
||||
typedef struct SMetaHbInfo SMetaHbInfo;
|
||||
|
||||
typedef struct SDispatchMsgInfo {
|
||||
SStreamDispatchReq* pData; // current dispatch data
|
||||
int8_t dispatchMsgType;
|
||||
int16_t msgType; // dispatch msg type
|
||||
int32_t retryCount; // retry send data count
|
||||
int64_t startTs; // dispatch start time, record total elapsed time for dispatch
|
||||
SArray* pRetryList; // current dispatch successfully completed node of downstream
|
||||
void* pTimer; // used to dispatch data after a given time duration
|
||||
SStreamDispatchReq* pData; // current dispatch data
|
||||
int8_t dispatchMsgType;
|
||||
int16_t msgType; // dispatch msg type
|
||||
int32_t retryCount; // retry send data count
|
||||
int64_t startTs; // dispatch start time, record total elapsed time for dispatch
|
||||
SArray* pRetryList; // current dispatch successfully completed node of downstream
|
||||
void* pTimer; // used to dispatch data after a given time duration
|
||||
} SDispatchMsgInfo;
|
||||
|
||||
typedef struct STaskQueue {
|
||||
|
@ -359,8 +359,8 @@ typedef struct STaskQueue {
|
|||
} STaskQueue;
|
||||
|
||||
typedef struct STaskSchedInfo {
|
||||
int8_t status;
|
||||
void* pTimer;
|
||||
int8_t status;
|
||||
void* pTimer;
|
||||
} STaskSchedInfo;
|
||||
|
||||
typedef struct SSinkRecorder {
|
||||
|
@ -394,7 +394,7 @@ typedef struct SHistoryTaskInfo {
|
|||
int32_t tickCount;
|
||||
int32_t retryTimes;
|
||||
int32_t waitInterval;
|
||||
int64_t haltVer; // offset in wal when halt the stream task
|
||||
int64_t haltVer; // offset in wal when halt the stream task
|
||||
} SHistoryTaskInfo;
|
||||
|
||||
typedef struct STaskOutputInfo {
|
||||
|
@ -460,9 +460,9 @@ typedef struct STaskStartInfo {
|
|||
int64_t startTs;
|
||||
int64_t readyTs;
|
||||
int32_t tasksWillRestart;
|
||||
int32_t taskStarting; // restart flag, sentinel to guard the restart procedure.
|
||||
SHashObj* pReadyTaskSet; // tasks that are all ready for running stream processing
|
||||
SHashObj* pFailedTaskSet; // tasks that are done the check downstream process, may be successful or failed
|
||||
int32_t taskStarting; // restart flag, sentinel to guard the restart procedure.
|
||||
SHashObj* pReadyTaskSet; // tasks that are all ready for running stream processing
|
||||
SHashObj* pFailedTaskSet; // tasks that are done the check downstream process, may be successful or failed
|
||||
int64_t elapsedTime;
|
||||
} STaskStartInfo;
|
||||
|
||||
|
@ -535,7 +535,7 @@ struct SStreamDispatchReq {
|
|||
int64_t stage; // nodeId from upstream task
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int32_t msgId; // msg id to identify if the incoming msg from the same sender
|
||||
int32_t msgId; // msg id to identify if the incoming msg from the same sender
|
||||
int32_t srcVgId;
|
||||
int32_t upstreamTaskId;
|
||||
int32_t upstreamChildId;
|
||||
|
@ -661,22 +661,22 @@ typedef struct STaskStatusEntry {
|
|||
int32_t status;
|
||||
int64_t stage;
|
||||
int32_t nodeId;
|
||||
int64_t verStart; // start version in WAL, only valid for source task
|
||||
int64_t verEnd; // end version in WAL, only valid for source task
|
||||
int64_t processedVer; // only valid for source task
|
||||
int64_t activeCheckpointId; // current active checkpoint id
|
||||
bool checkpointFailed; // denote if the checkpoint is failed or not
|
||||
double inputQUsed; // in MiB
|
||||
int64_t verStart; // start version in WAL, only valid for source task
|
||||
int64_t verEnd; // end version in WAL, only valid for source task
|
||||
int64_t processedVer; // only valid for source task
|
||||
int64_t activeCheckpointId; // current active checkpoint id
|
||||
bool checkpointFailed; // denote if the checkpoint is failed or not
|
||||
double inputQUsed; // in MiB
|
||||
double inputRate;
|
||||
double sinkQuota; // existed quota size for sink task
|
||||
double sinkDataSize; // sink to dest data size
|
||||
double sinkQuota; // existed quota size for sink task
|
||||
double sinkDataSize; // sink to dest data size
|
||||
} STaskStatusEntry;
|
||||
|
||||
typedef struct SStreamHbMsg {
|
||||
int32_t vgId;
|
||||
int32_t numOfTasks;
|
||||
SArray* pTaskStatus; // SArray<STaskStatusEntry>
|
||||
SArray* pUpdateNodes; // SArray<int32_t>, needs update the epsets in stream tasks for those nodes.
|
||||
SArray* pTaskStatus; // SArray<STaskStatusEntry>
|
||||
SArray* pUpdateNodes; // SArray<int32_t>, needs update the epsets in stream tasks for those nodes.
|
||||
} SStreamHbMsg;
|
||||
|
||||
int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pRsp);
|
||||
|
@ -700,7 +700,7 @@ typedef struct SNodeUpdateInfo {
|
|||
} SNodeUpdateInfo;
|
||||
|
||||
typedef struct SStreamTaskNodeUpdateMsg {
|
||||
int32_t transId; // to identify the msg
|
||||
int32_t transId; // to identify the msg
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
SArray* pNodeList; // SArray<SNodeUpdateInfo>
|
||||
|
@ -757,12 +757,13 @@ const char* streamTaskGetStatusStr(ETaskStatus status);
|
|||
void streamTaskResetStatus(SStreamTask* pTask);
|
||||
void streamTaskSetStatusReady(SStreamTask* pTask);
|
||||
|
||||
void initRpcMsg(SRpcMsg* pMsg, int32_t msgType, void* pCont, int32_t contLen);
|
||||
void initRpcMsg(SRpcMsg* pMsg, int32_t msgType, void* pCont, int32_t contLen);
|
||||
|
||||
// recover and fill history
|
||||
void streamTaskCheckDownstream(SStreamTask* pTask);
|
||||
void streamTaskCheckDownstream(SStreamTask* pTask);
|
||||
|
||||
int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_t vgId, int64_t stage, int64_t* oldStage);
|
||||
int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_t vgId, int64_t stage,
|
||||
int64_t* oldStage);
|
||||
int32_t streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList);
|
||||
void streamTaskResetUpstreamStageInfo(SStreamTask* pTask);
|
||||
bool streamTaskAllUpstreamClosed(SStreamTask* pTask);
|
||||
|
@ -788,18 +789,17 @@ bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer)
|
|||
int32_t streamQueueGetNumOfItems(const SStreamQueue* pQueue);
|
||||
|
||||
// common
|
||||
int32_t streamRestoreParam(SStreamTask* pTask);
|
||||
void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta);
|
||||
void streamTaskResume(SStreamTask* pTask);
|
||||
void streamTaskEnablePause(SStreamTask* pTask);
|
||||
int32_t streamTaskSetUpstreamInfo(SStreamTask* pTask, const SStreamTask* pUpstreamTask);
|
||||
void streamTaskUpdateUpstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet);
|
||||
void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet);
|
||||
void streamTaskSetFixedDownstreamInfo(SStreamTask* pTask, const SStreamTask* pDownstreamTask);
|
||||
int32_t streamTaskReleaseState(SStreamTask* pTask);
|
||||
int32_t streamTaskReloadState(SStreamTask* pTask);
|
||||
void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId);
|
||||
void streamTaskOpenAllUpstreamInput(SStreamTask* pTask);
|
||||
int32_t streamRestoreParam(SStreamTask* pTask);
|
||||
void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta);
|
||||
void streamTaskResume(SStreamTask* pTask);
|
||||
int32_t streamTaskSetUpstreamInfo(SStreamTask* pTask, const SStreamTask* pUpstreamTask);
|
||||
void streamTaskUpdateUpstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet);
|
||||
void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet);
|
||||
void streamTaskSetFixedDownstreamInfo(SStreamTask* pTask, const SStreamTask* pDownstreamTask);
|
||||
int32_t streamTaskReleaseState(SStreamTask* pTask);
|
||||
int32_t streamTaskReloadState(SStreamTask* pTask);
|
||||
void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId);
|
||||
void streamTaskOpenAllUpstreamInput(SStreamTask* pTask);
|
||||
|
||||
void streamTaskStatusInit(STaskStatusEntry* pEntry, const SStreamTask* pTask);
|
||||
void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc);
|
||||
|
@ -808,7 +808,7 @@ void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc);
|
|||
int32_t streamSetParamForStreamScannerStep1(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
|
||||
int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
|
||||
SScanhistoryDataInfo streamScanHistoryData(SStreamTask* pTask, int64_t st);
|
||||
int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask);
|
||||
int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask);
|
||||
|
||||
// agg level
|
||||
int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistoryFinishReq* pReq, SRpcHandleInfo* pInfo);
|
||||
|
@ -845,7 +845,7 @@ void streamMetaResetStartInfo(STaskStartInfo* pMeta);
|
|||
int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
|
||||
int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask);
|
||||
int32_t streamTaskBuildCheckpoint(SStreamTask* pTask);
|
||||
void streamTaskClearCheckInfo(SStreamTask* pTask);
|
||||
void streamTaskClearCheckInfo(SStreamTask* pTask, bool clearChkpReadyMsg);
|
||||
int32_t streamAlignTransferState(SStreamTask* pTask);
|
||||
int32_t streamBuildAndSendDropTaskMsg(SMsgCb* pMsgCb, int32_t vgId, SStreamTaskId* pTaskId);
|
||||
int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SStreamTask* pTask,
|
||||
|
|
|
@ -151,8 +151,6 @@ void startRsync(){
|
|||
uDebug("[rsync] start server successful");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
int uploadRsync(char* id, char* path){
|
||||
#ifdef WINDOWS
|
||||
char pathTransform[PATH_MAX] = {0};
|
||||
|
|
|
@ -3590,9 +3590,9 @@ void (*tColDataCalcSMA[])(SColData *pColData, int64_t *sum, int64_t *max, int64_
|
|||
tColDataCalcSMAUInt, // TSDB_DATA_TYPE_UINT
|
||||
tColDataCalcSMAUBigInt, // TSDB_DATA_TYPE_UBIGINT
|
||||
tColDataCalcSMAVarType, // TSDB_DATA_TYPE_JSON
|
||||
NULL, // TSDB_DATA_TYPE_VARBINARY
|
||||
NULL, // TSDB_DATA_TYPE_DECIMAL
|
||||
NULL, // TSDB_DATA_TYPE_BLOB
|
||||
tColDataCalcSMAVarType, // TSDB_DATA_TYPE_VARBINARY
|
||||
tColDataCalcSMAVarType, // TSDB_DATA_TYPE_DECIMAL
|
||||
tColDataCalcSMAVarType, // TSDB_DATA_TYPE_BLOB
|
||||
NULL, // TSDB_DATA_TYPE_MEDIUMBLOB
|
||||
tColDataCalcSMAVarType // TSDB_DATA_TYPE_GEOMETRY
|
||||
};
|
||||
|
|
|
@ -513,6 +513,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
|
|||
|
||||
tsNumOfTaskQueueThreads = tsNumOfCores / 2;
|
||||
tsNumOfTaskQueueThreads = TMAX(tsNumOfTaskQueueThreads, 4);
|
||||
|
||||
if (tsNumOfTaskQueueThreads >= 50) {
|
||||
tsNumOfTaskQueueThreads = 50;
|
||||
}
|
||||
|
@ -1743,4 +1744,4 @@ void taosSetAllDebugFlag(int32_t flag, bool rewrite) {
|
|||
uInfo("all debug flag are set to %d", flag);
|
||||
}
|
||||
|
||||
int8_t taosGranted() { return atomic_load_8(&tsGrant); }
|
||||
int8_t taosGranted() { return atomic_load_8(&tsGrant); }
|
|
@ -7152,7 +7152,7 @@ int32_t tDeserializeSMDropStreamReq(void *buf, int32_t bufLen, SMDropStreamReq *
|
|||
return 0;
|
||||
}
|
||||
|
||||
void tFreeSMDropStreamReq(SMDropStreamReq *pReq) {
|
||||
void tFreeMDropStreamReq(SMDropStreamReq *pReq) {
|
||||
FREESQL();
|
||||
}
|
||||
|
||||
|
|
|
@ -84,8 +84,10 @@ typedef struct {
|
|||
} SVnodeThread;
|
||||
|
||||
// vmInt.c
|
||||
int32_t vmGetPrimaryDisk(SVnodeMgmt *pMgmt, int32_t vgId);
|
||||
int32_t vmAllocPrimaryDisk(SVnodeMgmt *pMgmt, int32_t vgId);
|
||||
SVnodeObj *vmAcquireVnode(SVnodeMgmt *pMgmt, int32_t vgId);
|
||||
SVnodeObj *vmAcquireVnodeImpl(SVnodeMgmt *pMgmt, int32_t vgId, bool strict);
|
||||
void vmReleaseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode);
|
||||
int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl);
|
||||
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal);
|
||||
|
|
|
@ -281,8 +281,8 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
|
||||
vmGenerateWrapperCfg(pMgmt, &req, &wrapperCfg);
|
||||
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, req.vgId);
|
||||
if (pVnode != NULL && !pVnode->failed) {
|
||||
SVnodeObj *pVnode = vmAcquireVnodeImpl(pMgmt, req.vgId, false);
|
||||
if (pVnode != NULL && (req.replica == 1 || !pVnode->failed)) {
|
||||
dError("vgId:%d, already exist", req.vgId);
|
||||
tFreeSCreateVnodeReq(&req);
|
||||
vmReleaseVnode(pMgmt, pVnode);
|
||||
|
@ -291,10 +291,11 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
ASSERT(pVnode == NULL || pVnode->failed);
|
||||
|
||||
wrapperCfg.diskPrimary = pVnode ? pVnode->diskPrimary : vmAllocPrimaryDisk(pMgmt, vnodeCfg.vgId);
|
||||
int32_t diskPrimary = wrapperCfg.diskPrimary;
|
||||
int32_t diskPrimary = vmGetPrimaryDisk(pMgmt, vnodeCfg.vgId);
|
||||
if (diskPrimary < 0) {
|
||||
diskPrimary = vmAllocPrimaryDisk(pMgmt, vnodeCfg.vgId);
|
||||
}
|
||||
wrapperCfg.diskPrimary = diskPrimary;
|
||||
|
||||
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, vnodeCfg.vgId);
|
||||
|
||||
|
@ -371,7 +372,7 @@ int32_t vmProcessAlterVnodeTypeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
TMSG_INFO(pMsg->msgType));
|
||||
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, req.vgId);
|
||||
if (pVnode == NULL || pVnode->failed) {
|
||||
if (pVnode == NULL) {
|
||||
dError("vgId:%d, failed to alter vnode type since %s", req.vgId, terrstr());
|
||||
terrno = TSDB_CODE_VND_NOT_EXIST;
|
||||
if (pVnode) vmReleaseVnode(pMgmt, pVnode);
|
||||
|
@ -489,7 +490,7 @@ int32_t vmProcessCheckLearnCatchupReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
req.vgId, TMSG_INFO(pMsg->msgType));
|
||||
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, req.vgId);
|
||||
if (pVnode == NULL || pVnode->failed) {
|
||||
if (pVnode == NULL) {
|
||||
dError("vgId:%d, failed to alter vnode type since %s", req.vgId, terrstr());
|
||||
terrno = TSDB_CODE_VND_NOT_EXIST;
|
||||
if (pVnode) vmReleaseVnode(pMgmt, pVnode);
|
||||
|
@ -532,7 +533,7 @@ int32_t vmProcessDisableVnodeWriteReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
dInfo("vgId:%d, vnode write disable:%d", req.vgId, req.disable);
|
||||
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, req.vgId);
|
||||
if (pVnode == NULL || pVnode->failed) {
|
||||
if (pVnode == NULL) {
|
||||
dError("vgId:%d, failed to disable write since %s", req.vgId, terrstr());
|
||||
terrno = TSDB_CODE_VND_NOT_EXIST;
|
||||
if (pVnode) vmReleaseVnode(pMgmt, pVnode);
|
||||
|
@ -565,7 +566,7 @@ int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
dInfo("vgId:%d, start to alter vnode hashrange:[%u, %u], dstVgId:%d", req.srcVgId, req.hashBegin, req.hashEnd,
|
||||
req.dstVgId);
|
||||
pVnode = vmAcquireVnode(pMgmt, srcVgId);
|
||||
if (pVnode == NULL || pVnode->failed) {
|
||||
if (pVnode == NULL) {
|
||||
dError("vgId:%d, failed to alter hashrange since %s", srcVgId, terrstr());
|
||||
terrno = TSDB_CODE_VND_NOT_EXIST;
|
||||
if (pVnode) vmReleaseVnode(pMgmt, pVnode);
|
||||
|
@ -680,7 +681,7 @@ int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
}
|
||||
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, vgId);
|
||||
if (pVnode == NULL || pVnode->failed) {
|
||||
if (pVnode == NULL) {
|
||||
dError("vgId:%d, failed to alter replica since %s", vgId, terrstr());
|
||||
terrno = TSDB_CODE_VND_NOT_EXIST;
|
||||
if (pVnode) vmReleaseVnode(pMgmt, pVnode);
|
||||
|
@ -748,7 +749,7 @@ int32_t vmProcessDropVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, vgId);
|
||||
SVnodeObj *pVnode = vmAcquireVnodeImpl(pMgmt, vgId, false);
|
||||
if (pVnode == NULL) {
|
||||
dInfo("vgId:%d, failed to drop since %s", vgId, terrstr());
|
||||
terrno = TSDB_CODE_VND_NOT_EXIST;
|
||||
|
|
|
@ -19,6 +19,19 @@
|
|||
#include "vnd.h"
|
||||
#include "libs/function/tudf.h"
|
||||
|
||||
int32_t vmGetPrimaryDisk(SVnodeMgmt *pMgmt, int32_t vgId) {
|
||||
int32_t diskId = -1;
|
||||
SVnodeObj *pVnode = NULL;
|
||||
|
||||
taosThreadRwlockRdlock(&pMgmt->lock);
|
||||
taosHashGetDup(pMgmt->hash, &vgId, sizeof(int32_t), (void *)&pVnode);
|
||||
if (pVnode != NULL) {
|
||||
diskId = pVnode->diskPrimary;
|
||||
}
|
||||
taosThreadRwlockUnlock(&pMgmt->lock);
|
||||
return diskId;
|
||||
}
|
||||
|
||||
int32_t vmAllocPrimaryDisk(SVnodeMgmt *pMgmt, int32_t vgId) {
|
||||
STfs *pTfs = pMgmt->pTfs;
|
||||
int32_t diskId = 0;
|
||||
|
@ -74,12 +87,12 @@ int32_t vmAllocPrimaryDisk(SVnodeMgmt *pMgmt, int32_t vgId) {
|
|||
return diskId;
|
||||
}
|
||||
|
||||
SVnodeObj *vmAcquireVnode(SVnodeMgmt *pMgmt, int32_t vgId) {
|
||||
SVnodeObj *vmAcquireVnodeImpl(SVnodeMgmt *pMgmt, int32_t vgId, bool strict) {
|
||||
SVnodeObj *pVnode = NULL;
|
||||
|
||||
taosThreadRwlockRdlock(&pMgmt->lock);
|
||||
taosHashGetDup(pMgmt->hash, &vgId, sizeof(int32_t), (void *)&pVnode);
|
||||
if (pVnode == NULL || pVnode->dropped) {
|
||||
if (pVnode == NULL || strict && (pVnode->dropped || pVnode->failed)) {
|
||||
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
|
||||
pVnode = NULL;
|
||||
} else {
|
||||
|
@ -91,6 +104,8 @@ SVnodeObj *vmAcquireVnode(SVnodeMgmt *pMgmt, int32_t vgId) {
|
|||
return pVnode;
|
||||
}
|
||||
|
||||
SVnodeObj *vmAcquireVnode(SVnodeMgmt *pMgmt, int32_t vgId) { return vmAcquireVnodeImpl(pMgmt, vgId, true); }
|
||||
|
||||
void vmReleaseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
||||
if (pVnode == NULL) return;
|
||||
|
||||
|
@ -100,6 +115,15 @@ void vmReleaseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
|||
taosThreadRwlockUnlock(&pMgmt->lock);
|
||||
}
|
||||
|
||||
static void vmFreeVnodeObj(SVnodeObj **ppVnode) {
|
||||
if (!ppVnode || !(*ppVnode)) return;
|
||||
|
||||
SVnodeObj *pVnode = *ppVnode;
|
||||
taosMemoryFree(pVnode->path);
|
||||
taosMemoryFree(pVnode);
|
||||
ppVnode[0] = NULL;
|
||||
}
|
||||
|
||||
int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
|
||||
SVnodeObj *pVnode = taosMemoryCalloc(1, sizeof(SVnodeObj));
|
||||
if (pVnode == NULL) {
|
||||
|
@ -134,6 +158,12 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
|
|||
}
|
||||
|
||||
taosThreadRwlockWrlock(&pMgmt->lock);
|
||||
SVnodeObj *pOld = NULL;
|
||||
taosHashGetDup(pMgmt->hash, &pVnode->vgId, sizeof(int32_t), (void *)&pOld);
|
||||
if (pOld) {
|
||||
ASSERT(pOld->failed);
|
||||
vmFreeVnodeObj(&pOld);
|
||||
}
|
||||
int32_t code = taosHashPut(pMgmt->hash, &pVnode->vgId, sizeof(int32_t), &pVnode, sizeof(SVnodeObj *));
|
||||
taosThreadRwlockUnlock(&pMgmt->lock);
|
||||
|
||||
|
@ -223,8 +253,7 @@ _closed:
|
|||
vnodeDestroy(pVnode->vgId, path, pMgmt->pTfs);
|
||||
}
|
||||
|
||||
taosMemoryFree(pVnode->path);
|
||||
taosMemoryFree(pVnode);
|
||||
vmFreeVnodeObj(&pVnode);
|
||||
}
|
||||
|
||||
static int32_t vmRestoreVgroupId(SWrapperCfg *pCfg, STfs *pTfs) {
|
||||
|
@ -621,7 +650,7 @@ static void *vmRestoreVnodeInThread(void *param) {
|
|||
for (int32_t v = 0; v < pThread->vnodeNum; ++v) {
|
||||
SVnodeObj *pVnode = pThread->ppVnodes[v];
|
||||
if (pVnode->failed) {
|
||||
dError("vgId:%d, skip restoring vnode in failure mode.", pVnode->vgId);
|
||||
dError("vgId:%d, cannot restore a vnode in failed mode.", pVnode->vgId);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -187,7 +187,7 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
|
|||
pHead->vgId = ntohl(pHead->vgId);
|
||||
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, pHead->vgId);
|
||||
if (pVnode == NULL || pVnode->failed) {
|
||||
if (pVnode == NULL) {
|
||||
dGDebug("vgId:%d, msg:%p failed to put into vnode queue since %s, type:%s qtype:%d contLen:%d", pHead->vgId, pMsg,
|
||||
terrstr(), TMSG_INFO(pMsg->msgType), qtype, pHead->contLen);
|
||||
terrno = (terrno != 0) ? terrno : -1;
|
||||
|
@ -312,7 +312,7 @@ int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
|
|||
int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
|
||||
int32_t size = -1;
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, vgId);
|
||||
if (pVnode != NULL && !pVnode->failed) {
|
||||
if (pVnode != NULL) {
|
||||
switch (qtype) {
|
||||
case WRITE_QUEUE:
|
||||
size = taosQueueItemSize(pVnode->pWriteW.queue);
|
||||
|
|
|
@ -651,8 +651,7 @@ typedef struct SStreamConf {
|
|||
} SStreamConf;
|
||||
|
||||
typedef struct {
|
||||
char name[TSDB_STREAM_FNAME_LEN];
|
||||
// ctl
|
||||
char name[TSDB_STREAM_FNAME_LEN];
|
||||
SRWLatch lock;
|
||||
|
||||
// create info
|
||||
|
|
|
@ -22,17 +22,37 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
int32_t mndInitStream(SMnode *pMnode);
|
||||
void mndCleanupStream(SMnode *pMnode);
|
||||
typedef struct SStreamTransInfo {
|
||||
int64_t startTime;
|
||||
int32_t transId;
|
||||
const char *name;
|
||||
} SStreamTransInfo;
|
||||
|
||||
typedef struct SStreamTransMgmt {
|
||||
SHashObj *pDBTrans;
|
||||
} SStreamTransMgmt;
|
||||
|
||||
typedef struct SStreamExecInfo {
|
||||
SArray *pNodeList;
|
||||
int64_t ts; // snapshot ts
|
||||
SStreamTransMgmt transMgmt;
|
||||
int64_t activeCheckpoint; // active check point id
|
||||
SHashObj * pTaskMap;
|
||||
SArray * pTaskList;
|
||||
TdThreadMutex lock;
|
||||
} SStreamExecInfo;
|
||||
|
||||
extern SStreamExecInfo execInfo;
|
||||
|
||||
int32_t mndInitStream(SMnode *pMnode);
|
||||
void mndCleanupStream(SMnode *pMnode);
|
||||
SStreamObj *mndAcquireStream(SMnode *pMnode, char *streamName);
|
||||
void mndReleaseStream(SMnode *pMnode, SStreamObj *pStream);
|
||||
int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb);
|
||||
int32_t mndPersistStream(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream);
|
||||
|
||||
SSdbRaw *mndStreamActionEncode(SStreamObj *pStream);
|
||||
SSdbRow *mndStreamActionDecode(SSdbRaw *pRaw);
|
||||
|
||||
int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb);
|
||||
int32_t mndPersistStream(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream);
|
||||
int32_t mndStreamRegisterTrans(STrans* pTrans, const char* pName, const char* pSrcDb, const char* pDstDb);
|
||||
bool streamTransConflictOtherTrans(SMnode *pMnode, const char *pSrcDb, const char *pDstDb);
|
||||
|
||||
// for sma
|
||||
// TODO refactor
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
extern bool tsDeployOnSnode;
|
||||
|
||||
static int32_t doAddSinkTask(SStreamObj* pStream, SArray* pTaskList, SMnode* pMnode, int32_t vgId, SVgObj* pVgroup,
|
||||
SEpSet* pEpset, bool isFillhistory);
|
||||
SEpSet* pEpset, bool isFillhistory);
|
||||
|
||||
int32_t mndConvertRsmaTask(char** pDst, int32_t* pDstLen, const char* ast, int64_t uid, int8_t triggerType,
|
||||
int64_t watermark, int64_t deleteMark) {
|
||||
|
|
|
@ -34,7 +34,13 @@
|
|||
#define MND_STREAM_VER_NUMBER 4
|
||||
#define MND_STREAM_RESERVE_SIZE 64
|
||||
#define MND_STREAM_MAX_NUM 60
|
||||
#define MND_STREAM_CHECKPOINT_NAME "stream-checkpoint"
|
||||
|
||||
#define MND_STREAM_CHECKPOINT_NAME "stream-checkpoint"
|
||||
#define MND_STREAM_PAUSE_NAME "stream-pause"
|
||||
#define MND_STREAM_RESUME_NAME "stream-resume"
|
||||
#define MND_STREAM_DROP_NAME "stream-drop"
|
||||
#define MND_STREAM_TASK_RESET_NAME "stream-task-reset"
|
||||
#define MND_STREAM_TASK_UPDATE_NAME "stream-task-update"
|
||||
|
||||
typedef struct SNodeEntry {
|
||||
int32_t nodeId;
|
||||
|
@ -43,22 +49,13 @@ typedef struct SNodeEntry {
|
|||
int64_t hbTimestamp; // second
|
||||
} SNodeEntry;
|
||||
|
||||
typedef struct SStreamExecInfo {
|
||||
SArray *pNodeList;
|
||||
int64_t ts; // snapshot ts
|
||||
int64_t activeCheckpoint; // active check point id
|
||||
SHashObj * pTaskMap;
|
||||
SArray * pTaskList;
|
||||
TdThreadMutex lock;
|
||||
} SStreamExecInfo;
|
||||
|
||||
typedef struct SVgroupChangeInfo {
|
||||
SHashObj *pDBMap;
|
||||
SArray * pUpdateNodeList; // SArray<SNodeUpdateInfo>
|
||||
} SVgroupChangeInfo;
|
||||
|
||||
static int32_t mndNodeCheckSentinel = 0;
|
||||
static SStreamExecInfo execInfo;
|
||||
static int32_t mndNodeCheckSentinel = 0;
|
||||
SStreamExecInfo execInfo;
|
||||
|
||||
static int32_t mndStreamActionInsert(SSdb *pSdb, SStreamObj *pStream);
|
||||
static int32_t mndStreamActionDelete(SSdb *pSdb, SStreamObj *pStream);
|
||||
|
@ -83,17 +80,20 @@ static SArray *mndTakeVgroupSnapshot(SMnode *pMnode, bool *allReady);
|
|||
|
||||
static SVgroupChangeInfo mndFindChangedNodeInfo(SMnode *pMnode, const SArray *pPrevNodeList, const SArray *pNodeList);
|
||||
|
||||
static STrans *doCreateTrans(SMnode *pMnode, SStreamObj *pStream, const char *name);
|
||||
static STrans *doCreateTrans(SMnode *pMnode, SStreamObj *pStream, SRpcMsg *pReq, const char *name, const char* pMsg);
|
||||
static int32_t mndPersistTransLog(SStreamObj *pStream, STrans *pTrans);
|
||||
static void initTransAction(STransAction *pAction, void *pCont, int32_t contLen, int32_t msgType, const SEpSet *pEpset,
|
||||
int32_t retryCode);
|
||||
static int32_t createStreamUpdateTrans(SStreamObj *pStream, SVgroupChangeInfo *pInfo, STrans *pTrans);
|
||||
static void removeStreamTasksInBuf(SStreamObj *pStream, SStreamExecInfo *pExecNode);
|
||||
static void keepStreamTasksInBuf(SStreamObj *pStream, SStreamExecInfo *pExecNode);
|
||||
static void saveStreamTasksInfo(SStreamObj *pStream, SStreamExecInfo *pExecNode);
|
||||
static int32_t removeExpirednodeEntryAndTask(SArray *pNodeSnapshot);
|
||||
static int32_t doKillActiveCheckpointTrans(SMnode *pMnode);
|
||||
static int32_t killActiveCheckpointTrans(SMnode *pMnode, const char* pDbName, size_t len);
|
||||
static int32_t setNodeEpsetExpiredFlag(const SArray *pNodeList);
|
||||
|
||||
static SSdbRaw *mndStreamActionEncode(SStreamObj *pStream);
|
||||
static SSdbRow *mndStreamActionDecode(SSdbRaw *pRaw);
|
||||
|
||||
int32_t mndInitStream(SMnode *pMnode) {
|
||||
SSdbTable table = {
|
||||
.sdbType = SDB_STREAM,
|
||||
|
@ -133,8 +133,11 @@ int32_t mndInitStream(SMnode *pMnode) {
|
|||
mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_STREAM_TASKS, mndCancelGetNextStreamTask);
|
||||
|
||||
taosThreadMutexInit(&execInfo.lock, NULL);
|
||||
execInfo.pTaskMap = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), true, HASH_NO_LOCK);
|
||||
_hash_fn_t fn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR);
|
||||
|
||||
execInfo.pTaskList = taosArrayInit(4, sizeof(STaskId));
|
||||
execInfo.pTaskMap = taosHashInit(64, fn, true, HASH_NO_LOCK);
|
||||
execInfo.transMgmt.pDBTrans = taosHashInit(32, fn, true, HASH_NO_LOCK);
|
||||
|
||||
return sdbSetTable(pMnode->pSdb, table);
|
||||
}
|
||||
|
@ -142,6 +145,7 @@ int32_t mndInitStream(SMnode *pMnode) {
|
|||
void mndCleanupStream(SMnode *pMnode) {
|
||||
taosArrayDestroy(execInfo.pTaskList);
|
||||
taosHashCleanup(execInfo.pTaskMap);
|
||||
taosHashCleanup(execInfo.transMgmt.pDBTrans);
|
||||
taosThreadMutexDestroy(&execInfo.lock);
|
||||
mDebug("mnd stream exec info cleanup");
|
||||
}
|
||||
|
@ -335,7 +339,7 @@ static int32_t mndStreamGetPlanString(const char *ast, int8_t triggerType, int64
|
|||
.pAstRoot = pAst,
|
||||
.topicQuery = false,
|
||||
.streamQuery = true,
|
||||
.triggerType = triggerType == STREAM_TRIGGER_MAX_DELAY ? STREAM_TRIGGER_WINDOW_CLOSE : triggerType,
|
||||
.triggerType = (triggerType == STREAM_TRIGGER_MAX_DELAY) ? STREAM_TRIGGER_WINDOW_CLOSE : triggerType,
|
||||
.watermark = watermark,
|
||||
};
|
||||
code = qCreateQueryPlan(&cxt, &pPlan, NULL);
|
||||
|
@ -720,6 +724,34 @@ int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t checkForNumOfStreams(SMnode *pMnode, SStreamObj *pStreamObj) { // check for number of existed tasks
|
||||
int32_t numOfStream = 0;
|
||||
SStreamObj *pStream = NULL;
|
||||
void *pIter = NULL;
|
||||
|
||||
while ((pIter = sdbFetch(pMnode->pSdb, SDB_STREAM, pIter, (void **)&pStream)) != NULL) {
|
||||
if (pStream->sourceDbUid == pStreamObj->sourceDbUid) {
|
||||
++numOfStream;
|
||||
}
|
||||
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
|
||||
if (numOfStream > MND_STREAM_MAX_NUM) {
|
||||
mError("too many streams, no more than %d for each database", MND_STREAM_MAX_NUM);
|
||||
sdbCancelFetch(pMnode->pSdb, pIter);
|
||||
return TSDB_CODE_MND_TOO_MANY_STREAMS;
|
||||
}
|
||||
|
||||
if (pStream->targetStbUid == pStreamObj->targetStbUid) {
|
||||
mError("Cannot write the same stable as other stream:%s", pStream->name);
|
||||
sdbCancelFetch(pMnode->pSdb, pIter);
|
||||
return TSDB_CODE_MND_INVALID_TARGET_TABLE;
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
||||
SMnode * pMnode = pReq->info.node;
|
||||
int32_t code = -1;
|
||||
|
@ -732,6 +764,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
|||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
#ifdef WINDOWS
|
||||
terrno = TSDB_CODE_MND_INVALID_PLATFORM;
|
||||
goto _OVER;
|
||||
|
@ -772,42 +805,9 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
{
|
||||
int32_t numOfStream = 0;
|
||||
|
||||
SStreamObj *pStream = NULL;
|
||||
void * pIter = NULL;
|
||||
|
||||
while (1) {
|
||||
pIter = sdbFetch(pMnode->pSdb, SDB_STREAM, pIter, (void **)&pStream);
|
||||
if (pIter == NULL) {
|
||||
if (numOfStream > MND_STREAM_MAX_NUM) {
|
||||
mError("too many streams, no more than %d for each database", MND_STREAM_MAX_NUM);
|
||||
terrno = TSDB_CODE_MND_TOO_MANY_STREAMS;
|
||||
goto _OVER;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (pStream->sourceDbUid == streamObj.sourceDbUid) {
|
||||
++numOfStream;
|
||||
}
|
||||
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
if (numOfStream > MND_STREAM_MAX_NUM) {
|
||||
mError("too many streams, no more than %d for each database", MND_STREAM_MAX_NUM);
|
||||
terrno = TSDB_CODE_MND_TOO_MANY_STREAMS;
|
||||
sdbCancelFetch(pMnode->pSdb, pIter);
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
if (pStream->targetStbUid == streamObj.targetStbUid) {
|
||||
mError("Cannot write the same stable as other stream:%s", pStream->name);
|
||||
terrno = TSDB_CODE_MND_INVALID_TARGET_TABLE;
|
||||
sdbCancelFetch(pMnode->pSdb, pIter);
|
||||
goto _OVER;
|
||||
}
|
||||
}
|
||||
code = checkForNumOfStreams(pMnode, &streamObj);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq, "create-stream");
|
||||
|
@ -866,7 +866,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
|||
|
||||
taosThreadMutexLock(&execInfo.lock);
|
||||
mDebug("stream tasks register into node list");
|
||||
keepStreamTasksInBuf(&streamObj, &execInfo);
|
||||
saveStreamTasksInfo(&streamObj, &execInfo);
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
|
||||
code = TSDB_CODE_ACTION_IN_PROGRESS;
|
||||
|
@ -893,7 +893,6 @@ _OVER:
|
|||
}
|
||||
|
||||
mndReleaseStream(pMnode, pStream);
|
||||
|
||||
tFreeSCMCreateStreamReq(&createStreamReq);
|
||||
tFreeStreamObj(&streamObj);
|
||||
if(sql != NULL){
|
||||
|
@ -1268,7 +1267,7 @@ static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) {
|
|||
SMStreamDoCheckpointMsg *pMsg = (SMStreamDoCheckpointMsg *)pReq->pCont;
|
||||
int64_t checkpointId = pMsg->checkpointId;
|
||||
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, NULL, MND_STREAM_CHECKPOINT_NAME);
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, NULL, MND_STREAM_CHECKPOINT_NAME);
|
||||
if (pTrans == NULL) {
|
||||
mError("failed to trigger checkpoint, reason: %s", tstrerror(TSDB_CODE_OUT_OF_MEMORY));
|
||||
return -1;
|
||||
|
@ -1277,7 +1276,8 @@ static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) {
|
|||
mDebug("start to trigger checkpoint, checkpointId: %" PRId64, checkpointId);
|
||||
|
||||
const char *pDb = mndGetStreamDB(pMnode);
|
||||
mndTransSetDbName(pTrans, pDb, "checkpoint");
|
||||
mndTransSetDbName(pTrans, pDb, pDb);
|
||||
mndStreamRegisterTrans(pTrans, MND_STREAM_CHECKPOINT_NAME, pDb, pDb);
|
||||
taosMemoryFree((void *)pDb);
|
||||
|
||||
if (mndTransCheckConflict(pMnode, pTrans) != 0) {
|
||||
|
@ -1329,46 +1329,56 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
|
|||
if (dropReq.igNotExists) {
|
||||
mInfo("stream:%s not exist, ignore not exist is set", dropReq.name);
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
tFreeSMDropStreamReq(&dropReq);
|
||||
tFreeMDropStreamReq(&dropReq);
|
||||
return 0;
|
||||
} else {
|
||||
terrno = TSDB_CODE_MND_STREAM_NOT_EXIST;
|
||||
mError("stream:%s not exist failed to drop", dropReq.name);
|
||||
tFreeSMDropStreamReq(&dropReq);
|
||||
tFreeMDropStreamReq(&dropReq);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pStream->targetDb) != 0) {
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
tFreeSMDropStreamReq(&dropReq);
|
||||
tFreeMDropStreamReq(&dropReq);
|
||||
return -1;
|
||||
}
|
||||
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq, "drop-stream");
|
||||
// check if it is conflict with other trans in both sourceDb and targetDb.
|
||||
bool conflict = streamTransConflictOtherTrans(pMnode, pStream->sourceDb, pStream->targetDb);
|
||||
if (conflict) {
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
tFreeMDropStreamReq(&dropReq);
|
||||
return -1;
|
||||
}
|
||||
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq, MND_STREAM_DROP_NAME);
|
||||
if (pTrans == NULL) {
|
||||
mError("stream:%s, failed to drop since %s", dropReq.name, terrstr());
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
tFreeSMDropStreamReq(&dropReq);
|
||||
tFreeMDropStreamReq(&dropReq);
|
||||
return -1;
|
||||
}
|
||||
|
||||
mInfo("trans:%d, used to drop stream:%s", pTrans->id, dropReq.name);
|
||||
mInfo("trans:%d used to drop stream:%s", pTrans->id, dropReq.name);
|
||||
|
||||
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
|
||||
if (mndTransCheckConflict(pMnode, pTrans) != 0) {
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
mndTransDrop(pTrans);
|
||||
tFreeSMDropStreamReq(&dropReq);
|
||||
tFreeMDropStreamReq(&dropReq);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t code = mndStreamRegisterTrans(pTrans, MND_STREAM_DROP_NAME, pStream->sourceDb, pStream->targetDb);
|
||||
|
||||
// drop all tasks
|
||||
if (mndDropStreamTasks(pMnode, pTrans, pStream) < 0) {
|
||||
mError("stream:%s, failed to drop task since %s", dropReq.name, terrstr());
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
mndTransDrop(pTrans);
|
||||
tFreeSMDropStreamReq(&dropReq);
|
||||
tFreeMDropStreamReq(&dropReq);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -1376,7 +1386,7 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
|
|||
if (mndPersistDropStreamLog(pMnode, pTrans, pStream) < 0) {
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
mndTransDrop(pTrans);
|
||||
tFreeSMDropStreamReq(&dropReq);
|
||||
tFreeMDropStreamReq(&dropReq);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -1384,7 +1394,7 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
|
|||
mError("trans:%d, failed to prepare drop stream trans since %s", pTrans->id, terrstr());
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
mndTransDrop(pTrans);
|
||||
tFreeSMDropStreamReq(&dropReq);
|
||||
tFreeMDropStreamReq(&dropReq);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -1392,13 +1402,12 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
|
|||
|
||||
SName name = {0};
|
||||
tNameFromString(&name, dropReq.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||
// reuse this function for stream
|
||||
|
||||
auditRecord(pReq, pMnode->clusterId, "dropStream", "", name.dbname, dropReq.sql, dropReq.sqlLen);
|
||||
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
mndTransDrop(pTrans);
|
||||
tFreeSMDropStreamReq(&dropReq);
|
||||
tFreeMDropStreamReq(&dropReq);
|
||||
|
||||
return TSDB_CODE_ACTION_IN_PROGRESS;
|
||||
}
|
||||
|
@ -1814,6 +1823,13 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
// check if it is conflict with other trans in both sourceDb and targetDb.
|
||||
bool conflict = streamTransConflictOtherTrans(pMnode, pStream->sourceDb, pStream->targetDb);
|
||||
if (conflict) {
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool updated = taskNodeIsUpdated(pMnode);
|
||||
if (updated) {
|
||||
mError("tasks are not ready for pause, node update detected");
|
||||
|
@ -1822,7 +1838,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) {
|
|||
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq, "pause-stream");
|
||||
if (pTrans == NULL) {
|
||||
mError("stream:%s, failed to pause stream since %s", pauseReq.name, terrstr());
|
||||
mError("stream:%s failed to pause stream since %s", pauseReq.name, terrstr());
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
return -1;
|
||||
}
|
||||
|
@ -1836,7 +1852,9 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
// pause all tasks
|
||||
int32_t code = mndStreamRegisterTrans(pTrans, MND_STREAM_PAUSE_NAME, pStream->sourceDb, pStream->targetDb);
|
||||
|
||||
// if nodeUpdate happened, not send pause trans
|
||||
if (mndPauseAllStreamTasks(pMnode, pTrans, pStream) < 0) {
|
||||
mError("stream:%s, failed to pause task since %s", pauseReq.name, terrstr());
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
|
@ -1940,13 +1958,21 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq, "pause-stream");
|
||||
if (pTrans == NULL) {
|
||||
mError("stream:%s, failed to pause stream since %s", pauseReq.name, terrstr());
|
||||
// check if it is conflict with other trans in both sourceDb and targetDb.
|
||||
bool conflict = streamTransConflictOtherTrans(pMnode, pStream->sourceDb, pStream->targetDb);
|
||||
if (conflict) {
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
return -1;
|
||||
}
|
||||
mInfo("trans:%d, used to pause stream:%s", pTrans->id, pauseReq.name);
|
||||
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq, MND_STREAM_RESUME_NAME);
|
||||
if (pTrans == NULL) {
|
||||
mError("stream:%s, failed to resume stream since %s", pauseReq.name, terrstr());
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
return -1;
|
||||
}
|
||||
|
||||
mInfo("trans:%d used to resume stream:%s", pTrans->id, pauseReq.name);
|
||||
|
||||
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
|
||||
if (mndTransCheckConflict(pMnode, pTrans) != 0) {
|
||||
|
@ -1955,6 +1981,8 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
int32_t code = mndStreamRegisterTrans(pTrans, MND_STREAM_RESUME_NAME, pStream->sourceDb, pStream->targetDb);
|
||||
|
||||
// resume all tasks
|
||||
if (mndResumeAllStreamTasks(pTrans, pMnode, pStream, pauseReq.igUntreated) < 0) {
|
||||
mError("stream:%s, failed to drop task since %s", pauseReq.name, terrstr());
|
||||
|
@ -2240,7 +2268,7 @@ static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChange
|
|||
|
||||
// here create only one trans
|
||||
if (pTrans == NULL) {
|
||||
pTrans = doCreateTrans(pMnode, pStream, "stream-task-update");
|
||||
pTrans = doCreateTrans(pMnode, pStream, NULL, MND_STREAM_TASK_UPDATE_NAME, "update task epsets");
|
||||
if (pTrans == NULL) {
|
||||
sdbRelease(pSdb, pStream);
|
||||
sdbCancelFetch(pSdb, pIter);
|
||||
|
@ -2350,7 +2378,7 @@ static void doExtractTasksFromStream(SMnode *pMnode) {
|
|||
break;
|
||||
}
|
||||
|
||||
keepStreamTasksInBuf(pStream, &execInfo);
|
||||
saveStreamTasksInfo(pStream, &execInfo);
|
||||
sdbRelease(pSdb, pStream);
|
||||
}
|
||||
}
|
||||
|
@ -2436,6 +2464,17 @@ int32_t removeExpirednodeEntryAndTask(SArray *pNodeSnapshot) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void killAllCheckpointTrans(SMnode* pMnode, SVgroupChangeInfo* pChangeInfo) {
|
||||
void* pIter = NULL;
|
||||
while((pIter = taosHashIterate(pChangeInfo->pDBMap, pIter)) != NULL) {
|
||||
char* pDb = (char*) pIter;
|
||||
|
||||
size_t len = 0;
|
||||
void* pKey = taosHashGetKey(pDb, &len);
|
||||
killActiveCheckpointTrans(pMnode, pKey, len);
|
||||
}
|
||||
}
|
||||
|
||||
// this function runs by only one thread, so it is not multi-thread safe
|
||||
static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) {
|
||||
int32_t code = 0;
|
||||
|
@ -2477,7 +2516,8 @@ static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) {
|
|||
SVgroupChangeInfo changeInfo = mndFindChangedNodeInfo(pMnode, execInfo.pNodeList, pNodeSnapshot);
|
||||
if (taosArrayGetSize(changeInfo.pUpdateNodeList) > 0) {
|
||||
// kill current active checkpoint transaction, since the transaction is vnode wide.
|
||||
doKillActiveCheckpointTrans(pMnode);
|
||||
killAllCheckpointTrans(pMnode, &changeInfo);
|
||||
|
||||
code = mndProcessVgroupChange(pMnode, &changeInfo);
|
||||
|
||||
// keep the new vnode snapshot
|
||||
|
@ -2523,7 +2563,7 @@ static int32_t mndProcessNodeCheck(SRpcMsg *pReq) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void keepStreamTasksInBuf(SStreamObj *pStream, SStreamExecInfo *pExecNode) {
|
||||
void saveStreamTasksInfo(SStreamObj *pStream, SStreamExecInfo *pExecNode) {
|
||||
int32_t level = taosArrayGetSize(pStream->tasks);
|
||||
|
||||
for (int32_t i = 0; i < level; i++) {
|
||||
|
@ -2566,8 +2606,9 @@ void removeStreamTasksInBuf(SStreamObj *pStream, SStreamExecInfo *pExecNode) {
|
|||
STaskId *pId = taosArrayGet(pExecNode->pTaskList, k);
|
||||
if (pId->taskId == id.taskId && pId->streamId == id.streamId) {
|
||||
taosArrayRemove(pExecNode->pTaskList, k);
|
||||
mInfo("s-task:0x%x removed from buffer, remain:%d", (int32_t)id.taskId,
|
||||
(int32_t)taosArrayGetSize(pExecNode->pTaskList));
|
||||
|
||||
int32_t num = taosArrayGetSize(pExecNode->pTaskList);
|
||||
mInfo("s-task:0x%x removed from buffer, remain:%d", (int32_t)id.taskId, num);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -2578,15 +2619,15 @@ void removeStreamTasksInBuf(SStreamObj *pStream, SStreamExecInfo *pExecNode) {
|
|||
ASSERT(taosHashGetSize(pExecNode->pTaskMap) == taosArrayGetSize(pExecNode->pTaskList));
|
||||
}
|
||||
|
||||
STrans *doCreateTrans(SMnode *pMnode, SStreamObj *pStream, const char *name) {
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, NULL, name);
|
||||
STrans *doCreateTrans(SMnode *pMnode, SStreamObj *pStream, SRpcMsg *pReq, const char *name, const char* pMsg) {
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq, name);
|
||||
if (pTrans == NULL) {
|
||||
mError("failed to build trans:%s, reason: %s", name, tstrerror(TSDB_CODE_OUT_OF_MEMORY));
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
mDebug("start to build stream:0x%" PRIx64 " task DAG update", pStream->uid);
|
||||
mDebug("s-task:0x%"PRIx64" start to build trans %s", pStream->uid, pMsg);
|
||||
|
||||
mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
|
||||
if (mndTransCheckConflict(pMnode, pTrans) != 0) {
|
||||
|
@ -2601,7 +2642,7 @@ STrans *doCreateTrans(SMnode *pMnode, SStreamObj *pStream, const char *name) {
|
|||
}
|
||||
|
||||
int32_t createStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream) {
|
||||
STrans *pTrans = doCreateTrans(pMnode, pStream, "stream-task-reset");
|
||||
STrans *pTrans = doCreateTrans(pMnode, pStream, NULL, MND_STREAM_TASK_RESET_NAME, " reset from failed checkpoint");
|
||||
if (pTrans == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
|
@ -2665,43 +2706,36 @@ int32_t createStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream) {
|
|||
return TSDB_CODE_ACTION_IN_PROGRESS;
|
||||
}
|
||||
|
||||
int32_t doKillActiveCheckpointTrans(SMnode *pMnode) {
|
||||
int32_t transId = 0;
|
||||
SSdb * pSdb = pMnode->pSdb;
|
||||
STrans *pTrans = NULL;
|
||||
void * pIter = NULL;
|
||||
|
||||
while (1) {
|
||||
pIter = sdbFetch(pSdb, SDB_TRANS, pIter, (void **)&pTrans);
|
||||
if (pIter == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (strncmp(pTrans->opername, MND_STREAM_CHECKPOINT_NAME, tListLen(pTrans->opername) - 1) == 0) {
|
||||
transId = pTrans->id;
|
||||
sdbRelease(pSdb, pTrans);
|
||||
sdbCancelFetch(pSdb, pIter);
|
||||
break;
|
||||
}
|
||||
|
||||
sdbRelease(pSdb, pTrans);
|
||||
}
|
||||
|
||||
if (transId == 0) {
|
||||
mDebug("failed to find the checkpoint trans, reset not executed");
|
||||
int32_t killActiveCheckpointTrans(SMnode *pMnode, const char* pDBName, size_t len) {
|
||||
// data in the hash table will be removed automatically, no need to remove it here.
|
||||
SStreamTransInfo* pTransInfo = taosHashGet(execInfo.transMgmt.pDBTrans, pDBName, len);
|
||||
if (pTransInfo == NULL) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
pTrans = mndAcquireTrans(pMnode, transId);
|
||||
mInfo("kill checkpoint trans:%d", transId);
|
||||
// not checkpoint trans, ignore
|
||||
if (strcmp(pTransInfo->name, MND_STREAM_CHECKPOINT_NAME) != 0) {
|
||||
mDebug("not checkpoint trans, not kill it, name:%s, transId:%d", pTransInfo->name, pTransInfo->transId);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
STrans* pTrans = mndAcquireTrans(pMnode, pTransInfo->transId);
|
||||
if (pTrans != NULL) {
|
||||
mInfo("kill checkpoint transId:%d in Db:%s", pTransInfo->transId, pDBName);
|
||||
mndKillTrans(pMnode, pTrans);
|
||||
mndReleaseTrans(pMnode, pTrans);
|
||||
}
|
||||
|
||||
mndKillTrans(pMnode, pTrans);
|
||||
mndReleaseTrans(pMnode, pTrans);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t mndResetFromCheckpoint(SMnode *pMnode) {
|
||||
doKillActiveCheckpointTrans(pMnode);
|
||||
int32_t mndResetStatusFromCheckpoint(SMnode *pMnode, int32_t transId) {
|
||||
STrans* pTrans = mndAcquireTrans(pMnode, transId);
|
||||
if (pTrans != NULL) {
|
||||
mInfo("kill checkpoint transId:%d to reset task status", transId);
|
||||
mndKillTrans(pMnode, pTrans);
|
||||
mndReleaseTrans(pMnode, pTrans);
|
||||
}
|
||||
|
||||
// set all tasks status to be normal, refactor later to be stream level, instead of vnode level.
|
||||
SSdb * pSdb = pMnode->pSdb;
|
||||
|
@ -2713,7 +2747,13 @@ int32_t mndResetFromCheckpoint(SMnode *pMnode) {
|
|||
break;
|
||||
}
|
||||
|
||||
// todo this transaction should exist be only one
|
||||
bool conflict = streamTransConflictOtherTrans(pMnode, pStream->sourceDb, pStream->targetDb);
|
||||
if (conflict) {
|
||||
mError("stream:%s other trans exists in DB:%s & %s failed to start reset-status trans",
|
||||
pStream->name, pStream->sourceDb, pStream->targetDb);
|
||||
continue;
|
||||
}
|
||||
|
||||
mDebug("stream:%s (0x%" PRIx64 ") reset checkpoint procedure, create reset trans", pStream->name, pStream->uid);
|
||||
int32_t code = createStreamResetStatusTrans(pMnode, pStream);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -2842,7 +2882,7 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) {
|
|||
// if the execInfo.activeCheckpoint == 0, the checkpoint is restoring from wal
|
||||
mInfo("checkpointId:%" PRId64 " failed, issue task-reset trans to reset all tasks status",
|
||||
execInfo.activeCheckpoint);
|
||||
mndResetFromCheckpoint(pMnode);
|
||||
mndResetStatusFromCheckpoint(pMnode, activeCheckpointId);
|
||||
} else {
|
||||
mInfo("not all vgroups are ready, wait for next HB from stream tasks");
|
||||
}
|
||||
|
|
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "mndTrans.h"
|
||||
#include "mndStream.h"
|
||||
|
||||
typedef struct SKeyInfo {
|
||||
void* pKey;
|
||||
int32_t keyLen;
|
||||
} SKeyInfo;
|
||||
|
||||
static int32_t clearFinishedTrans(SMnode* pMnode);
|
||||
|
||||
int32_t mndStreamRegisterTrans(STrans* pTrans, const char* pName, const char* pSrcDb, const char* pDstDb) {
|
||||
SStreamTransInfo info = {.transId = pTrans->id, .startTime = taosGetTimestampMs(), .name = pName};
|
||||
taosHashPut(execInfo.transMgmt.pDBTrans, pSrcDb, strlen(pSrcDb), &info, sizeof(SStreamTransInfo));
|
||||
|
||||
if (strcmp(pSrcDb, pDstDb) != 0) {
|
||||
taosHashPut(execInfo.transMgmt.pDBTrans, pDstDb, strlen(pDstDb), &info, sizeof(SStreamTransInfo));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t clearFinishedTrans(SMnode* pMnode) {
|
||||
SArray* pList = taosArrayInit(4, sizeof(SKeyInfo));
|
||||
size_t keyLen = 0;
|
||||
|
||||
taosThreadMutexLock(&execInfo.lock);
|
||||
|
||||
void* pIter = NULL;
|
||||
while ((pIter = taosHashIterate(execInfo.transMgmt.pDBTrans, pIter)) != NULL) {
|
||||
SStreamTransInfo *pEntry = (SStreamTransInfo *)pIter;
|
||||
STrans* pTrans = mndAcquireTrans(pMnode, pEntry->transId);
|
||||
|
||||
// let's clear the finished trans
|
||||
if (pTrans == NULL) {
|
||||
void* pKey = taosHashGetKey(pEntry, &keyLen);
|
||||
// key is the name of src/dst db name
|
||||
SKeyInfo info = {.pKey = pKey, .keyLen = keyLen};
|
||||
|
||||
mDebug("transId:%d %s startTs:%" PRId64 "cleared due to finished", pEntry->transId, pEntry->name,
|
||||
pEntry->startTime);
|
||||
taosArrayPush(pList, &info);
|
||||
} else {
|
||||
mndReleaseTrans(pMnode, pTrans);
|
||||
}
|
||||
}
|
||||
|
||||
size_t num = taosArrayGetSize(pList);
|
||||
for(int32_t i = 0; i < num; ++i) {
|
||||
SKeyInfo* pKey = taosArrayGet(pList, i);
|
||||
taosHashRemove(execInfo.transMgmt.pDBTrans, pKey->pKey, pKey->keyLen);
|
||||
}
|
||||
|
||||
mDebug("clear %d finished stream-trans, remained:%d", (int32_t) num, taosHashGetSize(execInfo.transMgmt.pDBTrans));
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
|
||||
terrno = TSDB_CODE_SUCCESS;
|
||||
taosArrayDestroy(pList);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool streamTransConflictOtherTrans(SMnode* pMnode, const char* pSrcDb, const char* pDstDb) {
|
||||
clearFinishedTrans(pMnode);
|
||||
|
||||
taosThreadMutexLock(&execInfo.lock);
|
||||
int32_t num = taosHashGetSize(execInfo.transMgmt.pDBTrans);
|
||||
if (num <= 0) {
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
SStreamTransInfo *pEntry = taosHashGet(execInfo.transMgmt.pDBTrans, pSrcDb, strlen(pSrcDb));
|
||||
if (pEntry != NULL) {
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
mWarn("conflict with other transId:%d in Db:%s, trans:%s", pEntry->transId, pSrcDb, pEntry->name);
|
||||
return true;
|
||||
}
|
||||
|
||||
pEntry = taosHashGet(execInfo.transMgmt.pDBTrans, pDstDb, strlen(pDstDb));
|
||||
if (pEntry != NULL) {
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
mWarn("conflict with other transId:%d in Db:%s, trans:%s", pEntry->transId, pSrcDb, pEntry->name);
|
||||
return true;
|
||||
}
|
||||
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -82,7 +82,7 @@ void tqDestroyTqHandle(void* data) {
|
|||
taosMemoryFree(pData->msg);
|
||||
pData->msg = NULL;
|
||||
}
|
||||
if (pData->block != NULL){
|
||||
if (pData->block != NULL) {
|
||||
blockDataDestroy(pData->block);
|
||||
}
|
||||
}
|
||||
|
@ -585,9 +585,9 @@ int32_t tqProcessDeleteSubReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
|||
taosWLockLatch(&pTq->lock);
|
||||
bool exec = tqIsHandleExec(pHandle);
|
||||
|
||||
if(exec){
|
||||
if (exec) {
|
||||
tqInfo("vgId:%d, topic:%s, subscription is executing, delete wait for 10ms and retry, pHandle:%p", vgId,
|
||||
pHandle->subKey, pHandle);
|
||||
pHandle->subKey, pHandle);
|
||||
taosWUnLockLatch(&pTq->lock);
|
||||
taosMsleep(10);
|
||||
continue;
|
||||
|
@ -704,12 +704,12 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
|||
ret = tqMetaSaveHandle(pTq, req.subKey, &handle);
|
||||
taosWUnLockLatch(&pTq->lock);
|
||||
} else {
|
||||
while(1){
|
||||
while (1) {
|
||||
taosWLockLatch(&pTq->lock);
|
||||
bool exec = tqIsHandleExec(pHandle);
|
||||
if(exec){
|
||||
tqInfo("vgId:%d, topic:%s, subscription is executing, sub wait for 10ms and retry, pHandle:%p", pTq->pVnode->config.vgId,
|
||||
pHandle->subKey, pHandle);
|
||||
if (exec) {
|
||||
tqInfo("vgId:%d, topic:%s, subscription is executing, sub wait for 10ms and retry, pHandle:%p",
|
||||
pTq->pVnode->config.vgId, pHandle->subKey, pHandle);
|
||||
taosWUnLockLatch(&pTq->lock);
|
||||
taosMsleep(10);
|
||||
continue;
|
||||
|
@ -718,7 +718,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
|||
tqInfo("vgId:%d no switch consumer:0x%" PRIx64 " remains, because redo wal log", req.vgId, req.newConsumerId);
|
||||
} else {
|
||||
tqInfo("vgId:%d switch consumer from Id:0x%" PRIx64 " to Id:0x%" PRIx64, req.vgId, pHandle->consumerId,
|
||||
req.newConsumerId);
|
||||
req.newConsumerId);
|
||||
atomic_store_64(&pHandle->consumerId, req.newConsumerId);
|
||||
atomic_store_32(&pHandle->epoch, 0);
|
||||
tqUnregisterPushHandle(pTq, pHandle);
|
||||
|
@ -850,11 +850,11 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
|
|||
pTask->exec.pWalReader = walOpenReader(pTq->pVnode->pWal, &cond, pTask->id.taskId);
|
||||
}
|
||||
|
||||
// // reset the task status from unfinished transaction
|
||||
// if (pTask->status.taskStatus == TASK_STATUS__PAUSE) {
|
||||
// tqWarn("s-task:%s reset task status to be normal, status kept in taskMeta: Paused", pTask->id.idStr);
|
||||
// pTask->status.taskStatus = TASK_STATUS__READY;
|
||||
// }
|
||||
// // reset the task status from unfinished transaction
|
||||
// if (pTask->status.taskStatus == TASK_STATUS__PAUSE) {
|
||||
// tqWarn("s-task:%s reset task status to be normal, status kept in taskMeta: Paused", pTask->id.idStr);
|
||||
// pTask->status.taskStatus = TASK_STATUS__READY;
|
||||
// }
|
||||
|
||||
streamTaskResetUpstreamStageInfo(pTask);
|
||||
streamSetupScheduleTrigger(pTask);
|
||||
|
@ -906,7 +906,7 @@ static void doStartFillhistoryStep2(SStreamTask* pTask, SStreamTask* pStreamTask
|
|||
int64_t nextProcessedVer = pStreamTask->hTaskInfo.haltVer;
|
||||
|
||||
// if it's an source task, extract the last version in wal.
|
||||
SVersionRange *pRange = &pTask->dataRange.range;
|
||||
SVersionRange* pRange = &pTask->dataRange.range;
|
||||
|
||||
bool done = streamHistoryTaskSetVerRangeStep2(pTask, nextProcessedVer);
|
||||
pTask->execInfo.step2Start = taosGetTimestampMs();
|
||||
|
@ -918,7 +918,7 @@ static void doStartFillhistoryStep2(SStreamTask* pTask, SStreamTask* pStreamTask
|
|||
} else {
|
||||
STimeWindow* pWindow = &pTask->dataRange.window;
|
||||
tqDebug("s-task:%s level:%d verRange:%" PRId64 " - %" PRId64 " window:%" PRId64 "-%" PRId64
|
||||
", do secondary scan-history from WAL after halt the related stream task:%s",
|
||||
", do secondary scan-history from WAL after halt the related stream task:%s",
|
||||
id, pTask->info.taskLevel, pRange->minVer, pRange->maxVer, pWindow->skey, pWindow->ekey,
|
||||
pStreamTask->id.idStr);
|
||||
ASSERT(pTask->status.schedStatus == TASK_SCHED_STATUS__WAITING);
|
||||
|
@ -932,7 +932,7 @@ static void doStartFillhistoryStep2(SStreamTask* pTask, SStreamTask* pStreamTask
|
|||
tqDebug("s-task:%s wal reader start scan WAL verRange:%" PRId64 "-%" PRId64 ", set sched-status:%d", id, dstVer,
|
||||
pTask->dataRange.range.maxVer, TASK_SCHED_STATUS__INACTIVE);
|
||||
|
||||
/*int8_t status = */streamTaskSetSchedStatusInactive(pTask);
|
||||
/*int8_t status = */ streamTaskSetSchedStatusInactive(pTask);
|
||||
|
||||
// now the fill-history task starts to scan data from wal files.
|
||||
int32_t code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_SCANHIST_DONE);
|
||||
|
@ -961,7 +961,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
streamTaskGetStatus(pTask, &pStatus);
|
||||
|
||||
// avoid multi-thread exec
|
||||
while(1) {
|
||||
while (1) {
|
||||
int32_t sentinel = atomic_val_compare_exchange_32(&pTask->status.inScanHistorySentinel, 0, 1);
|
||||
if (sentinel != 0) {
|
||||
tqDebug("s-task:%s already in scan-history func, wait for 100ms, and try again", id);
|
||||
|
@ -973,16 +973,10 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
|
||||
// let's decide which step should be executed now
|
||||
if (pTask->execInfo.step1Start == 0) {
|
||||
ASSERT(pTask->status.pauseAllowed == false);
|
||||
int64_t ts = taosGetTimestampMs();
|
||||
|
||||
pTask->execInfo.step1Start = ts;
|
||||
tqDebug("s-task:%s start scan-history stage(step 1), status:%s, step1 startTs:%" PRId64, id, pStatus, ts);
|
||||
|
||||
// NOTE: in case of stream task, scan-history data in wal is not allowed to pause
|
||||
if (pTask->info.fillHistory == 1) {
|
||||
streamTaskEnablePause(pTask);
|
||||
}
|
||||
} else {
|
||||
if (pTask->execInfo.step2Start == 0) {
|
||||
tqDebug("s-task:%s continue exec scan-history(step1), original step1 startTs:%" PRId64 ", already elapsed:%.2fs",
|
||||
|
@ -1008,7 +1002,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int64_t st = taosGetTimestampMs();
|
||||
int64_t st = taosGetTimestampMs();
|
||||
SScanhistoryDataInfo retInfo = streamScanHistoryData(pTask, st);
|
||||
|
||||
double el = (taosGetTimestampMs() - st) / 1000.0;
|
||||
|
@ -1041,13 +1035,13 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
tqDebug("s-task:%s scan-history(step 1) ended, elapsed time:%.2fs", id, pTask->execInfo.step1El);
|
||||
|
||||
if (pTask->info.fillHistory) {
|
||||
SStreamTask* pStreamTask = NULL;
|
||||
SStreamTask* pStreamTask = NULL;
|
||||
|
||||
// 1. get the related stream task
|
||||
pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId);
|
||||
if (pStreamTask == NULL) {
|
||||
tqError("failed to find s-task:0x%"PRIx64", it may have been destroyed, drop related fill-history task:%s",
|
||||
pTask->streamTaskId.taskId, pTask->id.idStr);
|
||||
tqError("failed to find s-task:0x%" PRIx64 ", it may have been destroyed, drop related fill-history task:%s",
|
||||
pTask->streamTaskId.taskId, pTask->id.idStr);
|
||||
|
||||
tqDebug("s-task:%s fill-history task set status to be dropping", id);
|
||||
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &pTask->id);
|
||||
|
@ -1171,7 +1165,6 @@ int32_t tqProcessTaskResumeImpl(STQ* pTq, SStreamTask* pTask, int64_t sversion,
|
|||
int32_t level = pTask->info.taskLevel;
|
||||
if (level == TASK_LEVEL__SINK) {
|
||||
if (status == TASK_STATUS__UNINIT) {
|
||||
|
||||
}
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
return 0;
|
||||
|
@ -1213,12 +1206,12 @@ int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms
|
|||
SVResumeStreamTaskReq* pReq = (SVResumeStreamTaskReq*)msg;
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->streamId, pReq->taskId);
|
||||
int32_t code = tqProcessTaskResumeImpl(pTq, pTask, sversion, pReq->igUntreated);
|
||||
int32_t code = tqProcessTaskResumeImpl(pTq, pTask, sversion, pReq->igUntreated);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
|
||||
STaskId* pHTaskId = &pTask->hTaskInfo.id;
|
||||
STaskId* pHTaskId = &pTask->hTaskInfo.id;
|
||||
SStreamTask* pHistoryTask = streamMetaAcquireTask(pTq->pStreamMeta, pHTaskId->streamId, pHTaskId->taskId);
|
||||
if (pHistoryTask) {
|
||||
code = tqProcessTaskResumeImpl(pTq, pHistoryTask, sversion, pReq->igUntreated);
|
||||
|
@ -1236,89 +1229,6 @@ int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
// todo refactor.
|
||||
int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) {
|
||||
STQ* pTq = pVnode->pTq;
|
||||
int32_t vgId = pVnode->config.vgId;
|
||||
|
||||
SMsgHead* msgStr = pMsg->pCont;
|
||||
char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
int32_t code = 0;
|
||||
|
||||
SStreamDispatchReq req;
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msgBody, msgLen);
|
||||
if (tDecodeStreamDispatchReq(&decoder, &req) < 0) {
|
||||
code = TSDB_CODE_MSG_DECODE_ERROR;
|
||||
tDecoderClear(&decoder);
|
||||
goto FAIL;
|
||||
}
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
int32_t taskId = req.taskId;
|
||||
tqDebug("vgId:%d receive dispatch msg to s-task:0x%" PRIx64 "-0x%x", vgId, req.streamId, taskId);
|
||||
|
||||
// for test purpose
|
||||
// if (req.type == STREAM_INPUT__CHECKPOINT_TRIGGER) {
|
||||
// code = TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||
// goto FAIL;
|
||||
// }
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, taskId);
|
||||
if (pTask != NULL) {
|
||||
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
||||
streamProcessDispatchMsg(pTask, &req, &rsp);
|
||||
tDeleteStreamDispatchReq(&req);
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
return 0;
|
||||
} else {
|
||||
tDeleteStreamDispatchReq(&req);
|
||||
}
|
||||
|
||||
code = TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||
|
||||
FAIL:
|
||||
if (pMsg->info.handle == NULL) {
|
||||
tqError("s-task:0x%x vgId:%d msg handle is null, abort enqueue dispatch msg", vgId, taskId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
SMsgHead* pRspHead = rpcMallocCont(sizeof(SMsgHead) + sizeof(SStreamDispatchRsp));
|
||||
if (pRspHead == NULL) {
|
||||
SRpcMsg rsp = {.code = TSDB_CODE_OUT_OF_MEMORY, .info = pMsg->info};
|
||||
tqError("s-task:0x%x send dispatch error rsp, code:%s", taskId, tstrerror(code));
|
||||
tmsgSendRsp(&rsp);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pRspHead->vgId = htonl(req.upstreamNodeId);
|
||||
ASSERT(pRspHead->vgId != 0);
|
||||
|
||||
SStreamDispatchRsp* pRsp = POINTER_SHIFT(pRspHead, sizeof(SMsgHead));
|
||||
pRsp->streamId = htobe64(req.streamId);
|
||||
pRsp->upstreamTaskId = htonl(req.upstreamTaskId);
|
||||
pRsp->upstreamNodeId = htonl(req.upstreamNodeId);
|
||||
pRsp->downstreamNodeId = htonl(pVnode->config.vgId);
|
||||
pRsp->downstreamTaskId = htonl(req.taskId);
|
||||
pRsp->msgId = htonl(req.msgId);
|
||||
pRsp->stage = htobe64(req.stage);
|
||||
pRsp->inputStatus = TASK_OUTPUT_STATUS__NORMAL;
|
||||
|
||||
int32_t len = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp);
|
||||
SRpcMsg rsp = {.code = code, .info = pMsg->info, .contLen = len, .pCont = pRspHead};
|
||||
tqError("s-task:0x%x send dispatch error rsp, code:%s", taskId, tstrerror(code));
|
||||
|
||||
tmsgSendRsp(&rsp);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) {
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||
|
@ -1334,7 +1244,7 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
|
|||
tqDebug("vgId:%d not leader, ignore checkpoint-source msg, s-task:0x%x", vgId, req.taskId);
|
||||
SRpcMsg rsp = {0};
|
||||
buildCheckpointSourceRsp(&req, &pMsg->info, &rsp, 0);
|
||||
tmsgSendRsp(&rsp); // error occurs
|
||||
tmsgSendRsp(&rsp); // error occurs
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1342,7 +1252,7 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
|
|||
tqDebug("vgId:%d checkpoint-source msg received during restoring, s-task:0x%x ignore it", vgId, req.taskId);
|
||||
SRpcMsg rsp = {0};
|
||||
buildCheckpointSourceRsp(&req, &pMsg->info, &rsp, 0);
|
||||
tmsgSendRsp(&rsp); // error occurs
|
||||
tmsgSendRsp(&rsp); // error occurs
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1354,7 +1264,7 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
|
|||
tqError("vgId:%d failed to decode checkpoint-source msg, code:%s", vgId, tstrerror(code));
|
||||
SRpcMsg rsp = {0};
|
||||
buildCheckpointSourceRsp(&req, &pMsg->info, &rsp, 0);
|
||||
tmsgSendRsp(&rsp); // error occurs
|
||||
tmsgSendRsp(&rsp); // error occurs
|
||||
return code;
|
||||
}
|
||||
tDecoderClear(&decoder);
|
||||
|
@ -1365,22 +1275,23 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
|
|||
req.taskId);
|
||||
SRpcMsg rsp = {0};
|
||||
buildCheckpointSourceRsp(&req, &pMsg->info, &rsp, 0);
|
||||
tmsgSendRsp(&rsp); // error occurs
|
||||
tmsgSendRsp(&rsp); // error occurs
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// downstream not ready, current the stream tasks are not all ready. Ignore this checkpoint req.
|
||||
if (pTask->status.downstreamReady != 1) {
|
||||
pTask->chkInfo.failedId = req.checkpointId; // record the latest failed checkpoint id
|
||||
pTask->chkInfo.failedId = req.checkpointId; // record the latest failed checkpoint id
|
||||
pTask->checkpointingId = req.checkpointId;
|
||||
|
||||
qError("s-task:%s not ready for checkpoint, since downstream not ready, ignore this checkpoint:%" PRId64
|
||||
", set it failure", pTask->id.idStr, req.checkpointId);
|
||||
", set it failure",
|
||||
pTask->id.idStr, req.checkpointId);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
|
||||
SRpcMsg rsp = {0};
|
||||
buildCheckpointSourceRsp(&req, &pMsg->info, &rsp, 0);
|
||||
tmsgSendRsp(&rsp); // error occurs
|
||||
tmsgSendRsp(&rsp); // error occurs
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1390,14 +1301,14 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
|
|||
|
||||
if (status == TASK_STATUS__HALT || status == TASK_STATUS__PAUSE) {
|
||||
tqError("s-task:%s not ready for checkpoint, since it is halt, ignore this checkpoint:%" PRId64 ", set it failure",
|
||||
pTask->id.idStr, req.checkpointId);
|
||||
pTask->id.idStr, req.checkpointId);
|
||||
|
||||
taosThreadMutexUnlock(&pTask->lock);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
|
||||
SRpcMsg rsp = {0};
|
||||
buildCheckpointSourceRsp(&req, &pMsg->info, &rsp, 0);
|
||||
tmsgSendRsp(&rsp); // error occurs
|
||||
tmsgSendRsp(&rsp); // error occurs
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1431,13 +1342,13 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
|
|||
streamMetaWUnLock(pMeta);
|
||||
|
||||
qInfo("s-task:%s (vgId:%d) level:%d receive checkpoint-source msg chkpt:%" PRId64 ", total checkpoint reqs:%d",
|
||||
pTask->id.idStr, vgId, pTask->info.taskLevel, req.checkpointId, total);
|
||||
pTask->id.idStr, vgId, pTask->info.taskLevel, req.checkpointId, total);
|
||||
|
||||
code = streamAddCheckpointSourceRspMsg(&req, &pMsg->info, pTask, 1);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
SRpcMsg rsp = {0};
|
||||
buildCheckpointSourceRsp(&req, &pMsg->info, &rsp, 0);
|
||||
tmsgSendRsp(&rsp); // error occurs
|
||||
tmsgSendRsp(&rsp); // error occurs
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1455,13 +1366,13 @@ int32_t tqProcessTaskUpdateReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
}
|
||||
|
||||
int32_t tqProcessTaskResetReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
SVPauseStreamTaskReq* pReq = (SVPauseStreamTaskReq*) pMsg->pCont;
|
||||
SVPauseStreamTaskReq* pReq = (SVPauseStreamTaskReq*)pMsg->pCont;
|
||||
|
||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId);
|
||||
if (pTask == NULL) {
|
||||
tqError("vgId:%d process task-reset req, failed to acquire task:0x%x, it may have been dropped already", pMeta->vgId,
|
||||
pReq->taskId);
|
||||
tqError("vgId:%d process task-reset req, failed to acquire task:0x%x, it may have been dropped already",
|
||||
pMeta->vgId, pReq->taskId);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1469,8 +1380,7 @@ int32_t tqProcessTaskResetReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
|
||||
// clear flag set during do checkpoint, and open inputQ for all upstream tasks
|
||||
if (streamTaskGetStatus(pTask, NULL) == TASK_STATUS__CK) {
|
||||
streamTaskClearCheckInfo(pTask);
|
||||
taosArrayClear(pTask->pReadyMsgList);
|
||||
streamTaskClearCheckInfo(pTask, true);
|
||||
streamTaskSetStatusReady(pTask);
|
||||
}
|
||||
|
||||
|
|
|
@ -64,8 +64,10 @@ int32_t tqScanWalAsync(STQ* pTq, bool ckPause) {
|
|||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||
|
||||
bool alreadyRestored = pTq->pVnode->restored;
|
||||
|
||||
// do not launch the stream tasks, if it is a follower or not restored vnode.
|
||||
if (!(vnodeIsRoleLeader(pTq->pVnode) && pTq->pVnode->restored)) {
|
||||
if (!(vnodeIsRoleLeader(pTq->pVnode) && alreadyRestored)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -107,7 +109,9 @@ int32_t tqScanWalAsync(STQ* pTq, bool ckPause) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
tqDebug("vgId:%d create msg to start wal scan to launch stream tasks, numOfTasks:%d", vgId, numOfTasks);
|
||||
tqDebug("vgId:%d create msg to start wal scan to launch stream tasks, numOfTasks:%d, restored:%d", vgId, numOfTasks,
|
||||
alreadyRestored);
|
||||
|
||||
pRunReq->head.vgId = vgId;
|
||||
pRunReq->streamId = 0;
|
||||
pRunReq->taskId = STREAM_EXEC_EXTRACT_DATA_IN_WAL_ID;
|
||||
|
|
|
@ -1186,11 +1186,6 @@ const char *gFSBgTaskName[] = {NULL, "MERGE", "RETENTION", "COMPACT"};
|
|||
static int32_t tsdbFSRunBgTask(void *arg) {
|
||||
STFSBgTask *task = (STFSBgTask *)arg;
|
||||
STFileSystem *fs = task->fs;
|
||||
STFileSet *fset;
|
||||
|
||||
tsdbFSGetFSet(fs, task->fid, &fset);
|
||||
|
||||
ASSERT(fset != NULL && fset->bgTaskRunning == task);
|
||||
|
||||
task->launchTime = taosGetTimestampMs();
|
||||
task->run(task->arg);
|
||||
|
@ -1203,6 +1198,10 @@ static int32_t tsdbFSRunBgTask(void *arg) {
|
|||
|
||||
taosThreadMutexLock(&fs->tsdb->mutex);
|
||||
|
||||
STFileSet *fset = NULL;
|
||||
tsdbFSGetFSet(fs, task->fid, &fset);
|
||||
ASSERT(fset != NULL && fset->bgTaskRunning == task);
|
||||
|
||||
// free last
|
||||
tsdbDoDoneBgTask(fs, task);
|
||||
fset->bgTaskRunning = NULL;
|
||||
|
|
|
@ -572,7 +572,12 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, SBlockN
|
|||
|
||||
if (isEmptyQueryTimeWindow(&w)) {
|
||||
k += 1;
|
||||
continue;
|
||||
|
||||
if (k >= numOfTables) {
|
||||
break;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// 1. time range check
|
||||
|
|
|
@ -328,6 +328,7 @@ static int32_t sifInitParam(SNode *node, SIFParam *param, SIFCtx *ctx) {
|
|||
SIF_ERR_RET(scalarGenerateSetFromList((void **)¶m->pFilter, node, nl->node.resType.type));
|
||||
if (taosHashPut(ctx->pRes, &node, POINTER_BYTES, param, sizeof(*param))) {
|
||||
taosHashCleanup(param->pFilter);
|
||||
param->pFilter = NULL;
|
||||
indexError("taosHashPut nodeList failed, size:%d", (int32_t)sizeof(*param));
|
||||
SIF_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
|
|
|
@ -1433,14 +1433,18 @@ static int32_t dataTypeComp(const SDataType* l, const SDataType* r) {
|
|||
|
||||
static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) {
|
||||
if (isMultiResFunc(pOp->pLeft)) {
|
||||
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pLeft))->aliasName);
|
||||
generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pLeft))->aliasName);
|
||||
return DEAL_RES_ERROR;
|
||||
}
|
||||
if (isMultiResFunc(pOp->pRight)) {
|
||||
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pRight))->aliasName);
|
||||
generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)(pOp->pRight))->aliasName);
|
||||
return DEAL_RES_ERROR;
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS != scalarGetOperatorResultType(pOp)) {
|
||||
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pOp->node.aliasName);
|
||||
int32_t res = scalarGetOperatorResultType(pOp);
|
||||
if (TSDB_CODE_SUCCESS != res) {
|
||||
pCxt->errCode = res;
|
||||
return DEAL_RES_ERROR;
|
||||
}
|
||||
|
||||
return DEAL_RES_CONTINUE;
|
||||
|
@ -7958,7 +7962,7 @@ static int32_t translateDropStream(STranslateContext* pCxt, SDropStreamStmt* pSt
|
|||
tNameGetFullDbName(&name, dropReq.name);
|
||||
dropReq.igNotExists = pStmt->ignoreNotExists;
|
||||
int32_t code = buildCmdMsg(pCxt, TDMT_MND_DROP_STREAM, (FSerializeFunc)tSerializeSMDropStreamReq, &dropReq);
|
||||
tFreeSMDropStreamReq(&dropReq);
|
||||
tFreeMDropStreamReq(&dropReq);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -208,6 +208,7 @@ void sclFreeParam(SScalarParam *param) {
|
|||
if (param->columnData != NULL) {
|
||||
colDataDestroy(param->columnData);
|
||||
taosMemoryFreeClear(param->columnData);
|
||||
param->columnData = NULL;
|
||||
}
|
||||
|
||||
if (param->pHashFilter != NULL) {
|
||||
|
@ -845,6 +846,7 @@ int32_t sclExecOperator(SOperatorNode *node, SScalarCtx *ctx, SScalarParam *outp
|
|||
SScalarParam *params = NULL;
|
||||
int32_t rowNum = 0;
|
||||
int32_t code = 0;
|
||||
int32_t paramNum = 0;
|
||||
|
||||
// json not support in in operator
|
||||
if (nodeType(node->pLeft) == QUERY_NODE_VALUE) {
|
||||
|
@ -865,7 +867,7 @@ int32_t sclExecOperator(SOperatorNode *node, SScalarCtx *ctx, SScalarParam *outp
|
|||
|
||||
_bin_scalar_fn_t OperatorFn = getBinScalarOperatorFn(node->opType);
|
||||
|
||||
int32_t paramNum = scalarGetOperatorParamNum(node->opType);
|
||||
paramNum = scalarGetOperatorParamNum(node->opType);
|
||||
SScalarParam *pLeft = ¶ms[0];
|
||||
SScalarParam *pRight = paramNum > 1 ? ¶ms[1] : NULL;
|
||||
|
||||
|
|
|
@ -18,9 +18,9 @@
|
|||
|
||||
#include "executor.h"
|
||||
#include "query.h"
|
||||
#include "tstream.h"
|
||||
#include "streamBackendRocksdb.h"
|
||||
#include "trpc.h"
|
||||
#include "tstream.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -32,13 +32,13 @@ extern "C" {
|
|||
#define MAX_RETRY_LAUNCH_HISTORY_TASK 40
|
||||
#define RETRY_LAUNCH_INTERVAL_INC_RATE 1.2
|
||||
|
||||
#define MAX_BLOCK_NAME_NUM 1024
|
||||
#define DISPATCH_RETRY_INTERVAL_MS 300
|
||||
#define MAX_CONTINUE_RETRY_COUNT 5
|
||||
#define MAX_BLOCK_NAME_NUM 1024
|
||||
#define DISPATCH_RETRY_INTERVAL_MS 300
|
||||
#define MAX_CONTINUE_RETRY_COUNT 5
|
||||
|
||||
#define META_HB_CHECK_INTERVAL 200
|
||||
#define META_HB_SEND_IDLE_COUNTER 25 // send hb every 5 sec
|
||||
#define STREAM_TASK_KEY_LEN ((sizeof(int64_t)) << 1)
|
||||
#define META_HB_CHECK_INTERVAL 200
|
||||
#define META_HB_SEND_IDLE_COUNTER 25 // send hb every 5 sec
|
||||
#define STREAM_TASK_KEY_LEN ((sizeof(int64_t)) << 1)
|
||||
|
||||
#define STREAM_TASK_QUEUE_CAPACITY 20480
|
||||
#define STREAM_TASK_QUEUE_CAPACITY_IN_SIZE (30)
|
||||
|
@ -81,7 +81,8 @@ struct STokenBucket {
|
|||
double quotaCapacity; // available capacity for maximum input size, KiloBytes per Second
|
||||
double quotaRemain; // not consumed bytes per second
|
||||
double quotaRate; // number of token per second
|
||||
int64_t fillTimestamp; // fill timestamp
|
||||
int64_t tokenFillTimestamp; // fill timestamp
|
||||
int64_t quotaFillTimestamp; // fill timestamp
|
||||
};
|
||||
|
||||
struct SStreamQueue {
|
||||
|
@ -92,13 +93,13 @@ struct SStreamQueue {
|
|||
};
|
||||
|
||||
extern SStreamGlobalEnv streamEnv;
|
||||
extern int32_t streamBackendId;
|
||||
extern int32_t streamBackendCfWrapperId;
|
||||
extern int32_t streamBackendId;
|
||||
extern int32_t streamBackendCfWrapperId;
|
||||
|
||||
void streamRetryDispatchData(SStreamTask* pTask, int64_t waitDuration);
|
||||
int32_t streamDispatchStreamBlock(SStreamTask* pTask);
|
||||
void destroyDispatchMsg(SStreamDispatchReq* pReq, int32_t numOfVgroups);
|
||||
int32_t getNumOfDispatchBranch(SStreamTask* pTask);
|
||||
void streamRetryDispatchData(SStreamTask* pTask, int64_t waitDuration);
|
||||
int32_t streamDispatchStreamBlock(SStreamTask* pTask);
|
||||
void destroyDispatchMsg(SStreamDispatchReq* pReq, int32_t numOfVgroups);
|
||||
int32_t getNumOfDispatchBranch(SStreamTask* pTask);
|
||||
|
||||
int32_t streamProcessCheckpointBlock(SStreamTask* pTask, SStreamDataBlock* pBlock);
|
||||
SStreamDataBlock* createStreamBlockFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t blockType, int32_t srcVg);
|
||||
|
@ -119,19 +120,23 @@ int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask);
|
|||
int32_t streamTaskSendCheckpointSourceRsp(SStreamTask* pTask);
|
||||
int32_t streamTaskGetNumOfDownstream(const SStreamTask* pTask);
|
||||
|
||||
int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks, int32_t* blockSize);
|
||||
int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks,
|
||||
int32_t* blockSize);
|
||||
int32_t streamQueueItemGetSize(const SStreamQueueItem* pItem);
|
||||
void streamQueueItemIncSize(const SStreamQueueItem* pItem, int32_t size);
|
||||
const char* streamQueueItemGetTypeStr(int32_t type);
|
||||
|
||||
SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem* pElem);
|
||||
|
||||
int32_t streamTaskBuildScanhistoryRspMsg(SStreamTask* pTask, SStreamScanHistoryFinishReq* pReq, void** pBuffer, int32_t* pLen);
|
||||
int32_t streamTaskBuildScanhistoryRspMsg(SStreamTask* pTask, SStreamScanHistoryFinishReq* pReq, void** pBuffer,
|
||||
int32_t* pLen);
|
||||
int32_t streamAddEndScanHistoryMsg(SStreamTask* pTask, SRpcHandleInfo* pRpcInfo, SStreamScanHistoryFinishReq* pReq);
|
||||
int32_t streamNotifyUpstreamContinue(SStreamTask* pTask);
|
||||
int32_t streamTaskFillHistoryFinished(SStreamTask* pTask);
|
||||
int32_t streamTransferStateToStreamTask(SStreamTask* pTask);
|
||||
|
||||
void streamClearChkptReadyMsg(SStreamTask* pTask);
|
||||
|
||||
int32_t streamTaskInitTokenBucket(STokenBucket* pBucket, int32_t numCap, int32_t numRate, float quotaRate, const char*);
|
||||
STaskId streamTaskExtractKey(const SStreamTask* pTask);
|
||||
void streamTaskInitForLaunchHTask(SHistoryTaskInfo* pInfo);
|
||||
|
@ -145,17 +150,17 @@ void* streamQueueNextItem(SStreamQueue* pQueue);
|
|||
void streamFreeQitem(SStreamQueueItem* data);
|
||||
int32_t streamQueueGetItemSize(const SStreamQueue* pQueue);
|
||||
|
||||
typedef enum UPLOAD_TYPE{
|
||||
typedef enum UPLOAD_TYPE {
|
||||
UPLOAD_DISABLE = -1,
|
||||
UPLOAD_S3 = 0,
|
||||
UPLOAD_RSYNC = 1,
|
||||
} UPLOAD_TYPE;
|
||||
|
||||
UPLOAD_TYPE getUploadType();
|
||||
int uploadCheckpoint(char* id, char* path);
|
||||
int downloadCheckpoint(char* id, char* path);
|
||||
int deleteCheckpoint(char* id);
|
||||
int deleteCheckpointFile(char* id, char* name);
|
||||
int uploadCheckpoint(char* id, char* path);
|
||||
int downloadCheckpoint(char* id, char* path);
|
||||
int deleteCheckpoint(char* id);
|
||||
int deleteCheckpointFile(char* id, char* name);
|
||||
|
||||
int32_t onNormalTaskReady(SStreamTask* pTask);
|
||||
int32_t onScanhistoryTaskReady(SStreamTask* pTask);
|
||||
|
|
|
@ -13,9 +13,9 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "streamInt.h"
|
||||
#include "rsync.h"
|
||||
#include "cos.h"
|
||||
#include "rsync.h"
|
||||
#include "streamInt.h"
|
||||
|
||||
int32_t tEncodeStreamCheckpointSourceReq(SEncoder* pEncoder, const SStreamCheckpointSourceReq* pReq) {
|
||||
if (tStartEncode(pEncoder) < 0) return -1;
|
||||
|
@ -122,7 +122,7 @@ static int32_t appendCheckpointIntoInputQ(SStreamTask* pTask, int32_t checkpoint
|
|||
pBlock->info.rows = 1;
|
||||
pBlock->info.childId = pTask->info.selfChildId;
|
||||
|
||||
pChkpoint->blocks = taosArrayInit(4, sizeof(SSDataBlock));//pBlock;
|
||||
pChkpoint->blocks = taosArrayInit(4, sizeof(SSDataBlock)); // pBlock;
|
||||
taosArrayPush(pChkpoint->blocks, pBlock);
|
||||
|
||||
taosMemoryFree(pBlock);
|
||||
|
@ -158,6 +158,7 @@ static int32_t continueDispatchCheckpointBlock(SStreamDataBlock* pBlock, SStream
|
|||
|
||||
int32_t code = taosWriteQitem(pTask->outputq.queue->pQueue, pBlock);
|
||||
if (code == 0) {
|
||||
ASSERT(pTask->chkInfo.dispatchCheckpointTrigger == false);
|
||||
streamDispatchStreamBlock(pTask);
|
||||
} else {
|
||||
stError("s-task:%s failed to put checkpoint into outputQ, code:%s", pTask->id.idStr, tstrerror(code));
|
||||
|
@ -169,10 +170,10 @@ static int32_t continueDispatchCheckpointBlock(SStreamDataBlock* pBlock, SStream
|
|||
|
||||
int32_t streamProcessCheckpointBlock(SStreamTask* pTask, SStreamDataBlock* pBlock) {
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pBlock->blocks, 0);
|
||||
int64_t checkpointId = pDataBlock->info.version;
|
||||
int64_t checkpointId = pDataBlock->info.version;
|
||||
|
||||
const char* id = pTask->id.idStr;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
// set task status
|
||||
if (streamTaskGetStatus(pTask, NULL) != TASK_STATUS__CK) {
|
||||
|
@ -184,7 +185,7 @@ int32_t streamProcessCheckpointBlock(SStreamTask* pTask, SStreamDataBlock* pBloc
|
|||
}
|
||||
}
|
||||
|
||||
{ // todo: remove this when the pipeline checkpoint generating is used.
|
||||
{ // todo: remove this when the pipeline checkpoint generating is used.
|
||||
SStreamMeta* pMeta = pTask->pMeta;
|
||||
streamMetaWLock(pMeta);
|
||||
|
||||
|
@ -195,10 +196,11 @@ int32_t streamProcessCheckpointBlock(SStreamTask* pTask, SStreamDataBlock* pBloc
|
|||
streamMetaWUnLock(pMeta);
|
||||
}
|
||||
|
||||
//todo fix race condition: set the status and append checkpoint block
|
||||
// todo fix race condition: set the status and append checkpoint block
|
||||
int32_t taskLevel = pTask->info.taskLevel;
|
||||
if (taskLevel == TASK_LEVEL__SOURCE) {
|
||||
if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||
if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH ||
|
||||
pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||
stDebug("s-task:%s set childIdx:%d, and add checkpoint-trigger block into outputQ", id, pTask->info.selfChildId);
|
||||
continueDispatchCheckpointBlock(pBlock, pTask);
|
||||
} else { // only one task exists, no need to dispatch downstream info
|
||||
|
@ -221,20 +223,21 @@ int32_t streamProcessCheckpointBlock(SStreamTask* pTask, SStreamDataBlock* pBloc
|
|||
int32_t num = taosArrayGetSize(pTask->upstreamInfo.pList);
|
||||
if (notReady > 0) {
|
||||
stDebug("s-task:%s received checkpoint block, idx:%d, %d upstream tasks not send checkpoint info yet, total:%d",
|
||||
id, pTask->info.selfChildId, notReady, num);
|
||||
id, pTask->info.selfChildId, notReady, num);
|
||||
streamFreeQitem((SStreamQueueItem*)pBlock);
|
||||
return code;
|
||||
}
|
||||
|
||||
if (taskLevel == TASK_LEVEL__SINK) {
|
||||
stDebug("s-task:%s process checkpoint block, all %d upstreams sent checkpoint msgs, send ready msg to upstream",
|
||||
id, num);
|
||||
id, num);
|
||||
streamFreeQitem((SStreamQueueItem*)pBlock);
|
||||
streamTaskBuildCheckpoint(pTask);
|
||||
} else {
|
||||
stDebug(
|
||||
"s-task:%s process checkpoint block, all %d upstreams sent checkpoint msgs, dispatch checkpoint msg "
|
||||
"downstream", id, num);
|
||||
"downstream",
|
||||
id, num);
|
||||
|
||||
// set the needed checked downstream tasks, only when all downstream tasks do checkpoint complete, this task
|
||||
// can start local checkpoint procedure
|
||||
|
@ -262,7 +265,7 @@ int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask) {
|
|||
|
||||
if (notReady == 0) {
|
||||
stDebug("s-task:%s all downstream tasks have completed the checkpoint, start to do checkpoint for current task",
|
||||
pTask->id.idStr);
|
||||
pTask->id.idStr);
|
||||
appendCheckpointIntoInputQ(pTask, STREAM_INPUT__CHECKPOINT);
|
||||
} else {
|
||||
int32_t total = streamTaskGetNumOfDownstream(pTask);
|
||||
|
@ -272,13 +275,17 @@ int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void streamTaskClearCheckInfo(SStreamTask* pTask) {
|
||||
pTask->checkpointingId = 0; // clear the checkpoint id
|
||||
void streamTaskClearCheckInfo(SStreamTask* pTask, bool clearChkpReadyMsg) {
|
||||
pTask->checkpointingId = 0; // clear the checkpoint id
|
||||
pTask->chkInfo.failedId = 0;
|
||||
pTask->chkInfo.startTs = 0; // clear the recorded start time
|
||||
pTask->checkpointNotReadyTasks = 0;
|
||||
pTask->checkpointAlignCnt = 0;
|
||||
streamTaskOpenAllUpstreamInput(pTask); // open inputQ for all upstream tasks
|
||||
pTask->chkInfo.dispatchCheckpointTrigger = false;
|
||||
streamTaskOpenAllUpstreamInput(pTask); // open inputQ for all upstream tasks
|
||||
if (clearChkpReadyMsg) {
|
||||
streamClearChkptReadyMsg(pTask);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t streamSaveAllTaskStatus(SStreamMeta* pMeta, int64_t checkpointId) {
|
||||
|
@ -288,7 +295,7 @@ int32_t streamSaveAllTaskStatus(SStreamMeta* pMeta, int64_t checkpointId) {
|
|||
streamMetaWLock(pMeta);
|
||||
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pMeta->pTaskList); ++i) {
|
||||
STaskId* pId = taosArrayGet(pMeta->pTaskList, i);
|
||||
STaskId* pId = taosArrayGet(pMeta->pTaskList, i);
|
||||
SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, pId, sizeof(*pId));
|
||||
if (ppTask == NULL) {
|
||||
continue;
|
||||
|
@ -305,7 +312,7 @@ int32_t streamSaveAllTaskStatus(SStreamMeta* pMeta, int64_t checkpointId) {
|
|||
p->chkInfo.checkpointId = p->checkpointingId;
|
||||
p->chkInfo.checkpointVer = p->chkInfo.processedVer;
|
||||
|
||||
streamTaskClearCheckInfo(p);
|
||||
streamTaskClearCheckInfo(p, false);
|
||||
|
||||
char* str = NULL;
|
||||
streamTaskGetStatus(p, &str);
|
||||
|
@ -315,7 +322,7 @@ int32_t streamSaveAllTaskStatus(SStreamMeta* pMeta, int64_t checkpointId) {
|
|||
stDebug("s-task:%s vgId:%d save task status failed, since handle event failed", p->id.idStr, vgId);
|
||||
streamMetaWUnLock(pMeta);
|
||||
return -1;
|
||||
} else { // save the task
|
||||
} else { // save the task
|
||||
streamMetaSaveTask(pMeta, p);
|
||||
}
|
||||
|
||||
|
@ -372,33 +379,32 @@ int32_t streamTaskBuildCheckpoint(SStreamTask* pTask) {
|
|||
if (code != TSDB_CODE_SUCCESS) {
|
||||
// todo: let's retry send rsp to upstream/mnode
|
||||
stError("s-task:%s failed to send checkpoint rsp to upstream, checkpointId:%" PRId64 ", code:%s", pTask->id.idStr,
|
||||
pTask->checkpointingId, tstrerror(code));
|
||||
pTask->checkpointingId, tstrerror(code));
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int uploadCheckpointToS3(char* id, char* path){
|
||||
static int uploadCheckpointToS3(char* id, char* path) {
|
||||
TdDirPtr pDir = taosOpenDir(path);
|
||||
if (pDir == NULL) return -1;
|
||||
|
||||
TdDirEntryPtr de = NULL;
|
||||
while ((de = taosReadDir(pDir)) != NULL) {
|
||||
char* name = taosGetDirEntryName(de);
|
||||
if (strcmp(name, ".") == 0 || strcmp(name, "..") == 0 ||
|
||||
taosDirEntryIsDir(de)) continue;
|
||||
if (strcmp(name, ".") == 0 || strcmp(name, "..") == 0 || taosDirEntryIsDir(de)) continue;
|
||||
|
||||
char filename[PATH_MAX] = {0};
|
||||
if(path[strlen(path) - 1] == TD_DIRSEP_CHAR){
|
||||
if (path[strlen(path) - 1] == TD_DIRSEP_CHAR) {
|
||||
snprintf(filename, sizeof(filename), "%s%s", path, name);
|
||||
}else{
|
||||
} else {
|
||||
snprintf(filename, sizeof(filename), "%s%s%s", path, TD_DIRSEP, name);
|
||||
}
|
||||
|
||||
char object[PATH_MAX] = {0};
|
||||
snprintf(object, sizeof(object), "%s%s%s", id, TD_DIRSEP, name);
|
||||
|
||||
if(s3PutObjectFromFile2(filename, object) != 0){
|
||||
if (s3PutObjectFromFile2(filename, object) != 0) {
|
||||
taosCloseDir(&pDir);
|
||||
return -1;
|
||||
}
|
||||
|
@ -409,59 +415,59 @@ static int uploadCheckpointToS3(char* id, char* path){
|
|||
return 0;
|
||||
}
|
||||
|
||||
UPLOAD_TYPE getUploadType(){
|
||||
if(strlen(tsSnodeAddress) != 0){
|
||||
UPLOAD_TYPE getUploadType() {
|
||||
if (strlen(tsSnodeAddress) != 0) {
|
||||
return UPLOAD_RSYNC;
|
||||
}else if(tsS3StreamEnabled){
|
||||
} else if (tsS3StreamEnabled) {
|
||||
return UPLOAD_S3;
|
||||
}else{
|
||||
} else {
|
||||
return UPLOAD_DISABLE;
|
||||
}
|
||||
}
|
||||
|
||||
int uploadCheckpoint(char* id, char* path){
|
||||
if(id == NULL || path == NULL || strlen(id) == 0 || strlen(path) == 0 || strlen(path) >= PATH_MAX){
|
||||
int uploadCheckpoint(char* id, char* path) {
|
||||
if (id == NULL || path == NULL || strlen(id) == 0 || strlen(path) == 0 || strlen(path) >= PATH_MAX) {
|
||||
stError("uploadCheckpoint parameters invalid");
|
||||
return -1;
|
||||
}
|
||||
if(strlen(tsSnodeAddress) != 0){
|
||||
if (strlen(tsSnodeAddress) != 0) {
|
||||
return uploadRsync(id, path);
|
||||
}else if(tsS3StreamEnabled){
|
||||
} else if (tsS3StreamEnabled) {
|
||||
return uploadCheckpointToS3(id, path);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int downloadCheckpoint(char* id, char* path){
|
||||
if(id == NULL || path == NULL || strlen(id) == 0 || strlen(path) == 0 || strlen(path) >= PATH_MAX){
|
||||
int downloadCheckpoint(char* id, char* path) {
|
||||
if (id == NULL || path == NULL || strlen(id) == 0 || strlen(path) == 0 || strlen(path) >= PATH_MAX) {
|
||||
stError("downloadCheckpoint parameters invalid");
|
||||
return -1;
|
||||
}
|
||||
if(strlen(tsSnodeAddress) != 0){
|
||||
if (strlen(tsSnodeAddress) != 0) {
|
||||
return downloadRsync(id, path);
|
||||
}else if(tsS3StreamEnabled){
|
||||
} else if (tsS3StreamEnabled) {
|
||||
return s3GetObjectsByPrefix(id, path);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int deleteCheckpoint(char* id){
|
||||
if(id == NULL || strlen(id) == 0){
|
||||
int deleteCheckpoint(char* id) {
|
||||
if (id == NULL || strlen(id) == 0) {
|
||||
stError("deleteCheckpoint parameters invalid");
|
||||
return -1;
|
||||
}
|
||||
if(strlen(tsSnodeAddress) != 0){
|
||||
if (strlen(tsSnodeAddress) != 0) {
|
||||
return deleteRsync(id);
|
||||
}else if(tsS3StreamEnabled){
|
||||
} else if (tsS3StreamEnabled) {
|
||||
s3DeleteObjectsByPrefix(id);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int deleteCheckpointFile(char* id, char* name){
|
||||
int deleteCheckpointFile(char* id, char* name) {
|
||||
char object[128] = {0};
|
||||
snprintf(object, sizeof(object), "%s/%s", id, name);
|
||||
char *tmp = object;
|
||||
char* tmp = object;
|
||||
s3DeleteObjects((const char**)&tmp, 1);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -14,9 +14,9 @@
|
|||
*/
|
||||
|
||||
#include "streamInt.h"
|
||||
#include "tmisce.h"
|
||||
#include "trpc.h"
|
||||
#include "ttimer.h"
|
||||
#include "tmisce.h"
|
||||
|
||||
typedef struct SBlockName {
|
||||
uint32_t hashValue;
|
||||
|
@ -231,7 +231,7 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock)
|
|||
|
||||
buf = NULL;
|
||||
stDebug("s-task:%s (child %d) send retrieve req to task:0x%x (vgId:%d), reqId:0x%" PRIx64, pTask->id.idStr,
|
||||
pTask->info.selfChildId, pEpInfo->taskId, pEpInfo->nodeId, req.reqId);
|
||||
pTask->info.selfChildId, pEpInfo->taskId, pEpInfo->nodeId, req.reqId);
|
||||
}
|
||||
code = 0;
|
||||
|
||||
|
@ -270,7 +270,7 @@ int32_t streamSendCheckMsg(SStreamTask* pTask, const SStreamTaskCheckReq* pReq,
|
|||
|
||||
initRpcMsg(&msg, TDMT_VND_STREAM_TASK_CHECK, buf, tlen + sizeof(SMsgHead));
|
||||
stDebug("s-task:%s (level:%d) send check msg to s-task:0x%" PRIx64 ":0x%x (vgId:%d)", pTask->id.idStr,
|
||||
pTask->info.taskLevel, pReq->streamId, pReq->downstreamTaskId, nodeId);
|
||||
pTask->info.taskLevel, pReq->streamId, pReq->downstreamTaskId, nodeId);
|
||||
|
||||
tmsgSendReq(pEpSet, &msg);
|
||||
return 0;
|
||||
|
@ -343,7 +343,8 @@ static int32_t doBuildDispatchMsg(SStreamTask* pTask, const SStreamDataBlock* pD
|
|||
SSDataBlock* pDataBlock = taosArrayGet(pData->blocks, i);
|
||||
|
||||
// TODO: do not use broadcast
|
||||
if (pDataBlock->info.type == STREAM_DELETE_RESULT || pDataBlock->info.type == STREAM_CHECKPOINT || pDataBlock->info.type == STREAM_TRANS_STATE) {
|
||||
if (pDataBlock->info.type == STREAM_DELETE_RESULT || pDataBlock->info.type == STREAM_CHECKPOINT ||
|
||||
pDataBlock->info.type == STREAM_TRANS_STATE) {
|
||||
for (int32_t j = 0; j < numOfVgroups; j++) {
|
||||
code = streamAddBlockIntoDispatchMsg(pDataBlock, &pReqs[j]);
|
||||
if (code != 0) {
|
||||
|
@ -362,7 +363,7 @@ static int32_t doBuildDispatchMsg(SStreamTask* pTask, const SStreamDataBlock* pD
|
|||
}
|
||||
|
||||
code = streamSearchAndAddBlock(pTask, pReqs, pDataBlock, numOfVgroups, pDataBlock->info.id.groupId);
|
||||
if(code != 0) {
|
||||
if (code != 0) {
|
||||
destroyDispatchMsg(pReqs, numOfVgroups);
|
||||
return code;
|
||||
}
|
||||
|
@ -371,13 +372,14 @@ static int32_t doBuildDispatchMsg(SStreamTask* pTask, const SStreamDataBlock* pD
|
|||
pTask->msgInfo.pData = pReqs;
|
||||
}
|
||||
|
||||
stDebug("s-task:%s build dispatch msg success, msgId:%d, stage:%" PRId64, pTask->id.idStr, pTask->execInfo.dispatch, pTask->pMeta->stage);
|
||||
stDebug("s-task:%s build dispatch msg success, msgId:%d, stage:%" PRId64, pTask->id.idStr, pTask->execInfo.dispatch,
|
||||
pTask->pMeta->stage);
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t sendDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pDispatchMsg) {
|
||||
int32_t code = 0;
|
||||
int32_t msgId = pTask->execInfo.dispatch;
|
||||
int32_t code = 0;
|
||||
int32_t msgId = pTask->execInfo.dispatch;
|
||||
const char* id = pTask->id.idStr;
|
||||
|
||||
if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
|
||||
|
@ -393,8 +395,8 @@ static int32_t sendDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pDispatch
|
|||
SArray* vgInfo = pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos;
|
||||
int32_t numOfVgroups = taosArrayGetSize(vgInfo);
|
||||
|
||||
stDebug("s-task:%s (child taskId:%d) start to shuffle-dispatch blocks to %d vgroup(s), msgId:%d",
|
||||
id, pTask->info.selfChildId, numOfVgroups, msgId);
|
||||
stDebug("s-task:%s (child taskId:%d) start to shuffle-dispatch blocks to %d vgroup(s), msgId:%d", id,
|
||||
pTask->info.selfChildId, numOfVgroups, msgId);
|
||||
|
||||
for (int32_t i = 0; i < numOfVgroups; i++) {
|
||||
if (pDispatchMsg[i].blockNum > 0) {
|
||||
|
@ -409,7 +411,8 @@ static int32_t sendDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pDispatch
|
|||
}
|
||||
}
|
||||
|
||||
stDebug("s-task:%s complete shuffle-dispatch blocks to all %d vnodes, msgId:%d", pTask->id.idStr, numOfVgroups, msgId);
|
||||
stDebug("s-task:%s complete shuffle-dispatch blocks to all %d vnodes, msgId:%d", pTask->id.idStr, numOfVgroups,
|
||||
msgId);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
@ -434,20 +437,20 @@ static void doRetryDispatchData(void* param, void* tmrId) {
|
|||
SArray* pList = taosArrayDup(pTask->msgInfo.pRetryList, NULL);
|
||||
taosArrayClear(pTask->msgInfo.pRetryList);
|
||||
|
||||
SStreamDispatchReq *pReq = pTask->msgInfo.pData;
|
||||
SStreamDispatchReq* pReq = pTask->msgInfo.pData;
|
||||
|
||||
if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||
SArray* vgInfo = pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos;
|
||||
int32_t numOfVgroups = taosArrayGetSize(vgInfo);
|
||||
|
||||
int32_t numOfFailed = taosArrayGetSize(pList);
|
||||
stDebug("s-task:%s (child taskId:%d) retry shuffle-dispatch blocks to %d vgroup(s), msgId:%d",
|
||||
id, pTask->info.selfChildId, numOfFailed, msgId);
|
||||
stDebug("s-task:%s (child taskId:%d) retry shuffle-dispatch blocks to %d vgroup(s), msgId:%d", id,
|
||||
pTask->info.selfChildId, numOfFailed, msgId);
|
||||
|
||||
for (int32_t i = 0; i < numOfFailed; i++) {
|
||||
int32_t vgId = *(int32_t*) taosArrayGet(pList, i);
|
||||
int32_t vgId = *(int32_t*)taosArrayGet(pList, i);
|
||||
|
||||
for(int32_t j = 0; j < numOfVgroups; ++j) {
|
||||
for (int32_t j = 0; j < numOfVgroups; ++j) {
|
||||
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, j);
|
||||
if (pVgInfo->vgId == vgId) {
|
||||
stDebug("s-task:%s (child taskId:%d) shuffle-dispatch blocks:%d to vgId:%d", pTask->id.idStr,
|
||||
|
@ -461,7 +464,8 @@ static void doRetryDispatchData(void* param, void* tmrId) {
|
|||
}
|
||||
}
|
||||
|
||||
stDebug("s-task:%s complete re-try shuffle-dispatch blocks to all %d vnodes, msgId:%d", pTask->id.idStr, numOfFailed, msgId);
|
||||
stDebug("s-task:%s complete re-try shuffle-dispatch blocks to all %d vnodes, msgId:%d", pTask->id.idStr,
|
||||
numOfFailed, msgId);
|
||||
} else {
|
||||
int32_t vgId = pTask->outputInfo.fixedDispatcher.nodeId;
|
||||
SEpSet* pEpSet = &pTask->outputInfo.fixedDispatcher.epSet;
|
||||
|
@ -478,8 +482,8 @@ static void doRetryDispatchData(void* param, void* tmrId) {
|
|||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
if (!streamTaskShouldStop(pTask)) {
|
||||
// stDebug("s-task:%s reset the waitRspCnt to be 0 before launch retry dispatch", pTask->id.idStr);
|
||||
// atomic_store_32(&pTask->outputInfo.shuffleDispatcher.waitingRspCnt, 0);
|
||||
// stDebug("s-task:%s reset the waitRspCnt to be 0 before launch retry dispatch", pTask->id.idStr);
|
||||
// atomic_store_32(&pTask->outputInfo.shuffleDispatcher.waitingRspCnt, 0);
|
||||
if (streamTaskShouldPause(pTask)) {
|
||||
streamRetryDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS * 10);
|
||||
} else {
|
||||
|
@ -531,10 +535,12 @@ int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, S
|
|||
}
|
||||
|
||||
if (pDataBlock->info.parTbName[0]) {
|
||||
snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->outputInfo.shuffleDispatcher.dbInfo.db, pDataBlock->info.parTbName);
|
||||
snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->outputInfo.shuffleDispatcher.dbInfo.db,
|
||||
pDataBlock->info.parTbName);
|
||||
} else {
|
||||
buildCtbNameByGroupIdImpl(pTask->outputInfo.shuffleDispatcher.stbFullName, groupId, pDataBlock->info.parTbName);
|
||||
snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->outputInfo.shuffleDispatcher.dbInfo.db, pDataBlock->info.parTbName);
|
||||
snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->outputInfo.shuffleDispatcher.dbInfo.db,
|
||||
pDataBlock->info.parTbName);
|
||||
}
|
||||
|
||||
/*uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));*/
|
||||
|
@ -576,13 +582,15 @@ int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, S
|
|||
}
|
||||
|
||||
int32_t streamDispatchStreamBlock(SStreamTask* pTask) {
|
||||
ASSERT((pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH));
|
||||
ASSERT((pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH ||
|
||||
pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH));
|
||||
|
||||
const char* id = pTask->id.idStr;
|
||||
int32_t numOfElems = streamQueueGetNumOfItems(pTask->outputq.queue);
|
||||
int32_t numOfElems = streamQueueGetNumOfItems(pTask->outputq.queue);
|
||||
if (numOfElems > 0) {
|
||||
double size = SIZE_IN_MiB(taosQueueMemorySize(pTask->outputq.queue->pQueue));
|
||||
stDebug("s-task:%s start to dispatch intermediate block to downstream, elem in outputQ:%d, size:%.2fMiB", id, numOfElems, size);
|
||||
stDebug("s-task:%s start to dispatch intermediate block to downstream, elem in outputQ:%d, size:%.2fMiB", id,
|
||||
numOfElems, size);
|
||||
}
|
||||
|
||||
// to make sure only one dispatch is running
|
||||
|
@ -593,6 +601,12 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (pTask->chkInfo.dispatchCheckpointTrigger) {
|
||||
stDebug("s-task:%s already send checkpoint trigger, not dispatch anymore", id);
|
||||
atomic_store_8(&pTask->outputq.status, TASK_OUTPUT_STATUS__NORMAL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ASSERT(pTask->msgInfo.pData == NULL);
|
||||
stDebug("s-task:%s start to dispatch msg, set output status:%d", id, pTask->outputq.status);
|
||||
|
||||
|
@ -612,7 +626,7 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) {
|
|||
int32_t code = doBuildDispatchMsg(pTask, pBlock);
|
||||
if (code == 0) {
|
||||
destroyStreamDataBlock(pBlock);
|
||||
} else { // todo handle build dispatch msg failed
|
||||
} else { // todo handle build dispatch msg failed
|
||||
}
|
||||
|
||||
int32_t retryCount = 0;
|
||||
|
@ -635,8 +649,9 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) {
|
|||
|
||||
if (++retryCount > MAX_CONTINUE_RETRY_COUNT) { // add to timer to retry
|
||||
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
||||
stDebug("s-task:%s failed to dispatch msg to downstream for %d times, code:%s, add timer to retry in %dms, ref:%d",
|
||||
pTask->id.idStr, retryCount, tstrerror(terrno), DISPATCH_RETRY_INTERVAL_MS, ref);
|
||||
stDebug(
|
||||
"s-task:%s failed to dispatch msg to downstream for %d times, code:%s, add timer to retry in %dms, ref:%d",
|
||||
pTask->id.idStr, retryCount, tstrerror(terrno), DISPATCH_RETRY_INTERVAL_MS, ref);
|
||||
|
||||
streamRetryDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
||||
break;
|
||||
|
@ -659,7 +674,8 @@ int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask) {
|
|||
if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
|
||||
req.downstreamTaskId = pTask->outputInfo.fixedDispatcher.taskId;
|
||||
pTask->notReadyTasks = 1;
|
||||
doDispatchScanHistoryFinishMsg(pTask, &req, pTask->outputInfo.fixedDispatcher.nodeId, &pTask->outputInfo.fixedDispatcher.epSet);
|
||||
doDispatchScanHistoryFinishMsg(pTask, &req, pTask->outputInfo.fixedDispatcher.nodeId,
|
||||
&pTask->outputInfo.fixedDispatcher.epSet);
|
||||
} else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||
SArray* vgInfo = pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos;
|
||||
int32_t numOfVgs = taosArrayGetSize(vgInfo);
|
||||
|
@ -667,8 +683,8 @@ int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask) {
|
|||
|
||||
char* p = NULL;
|
||||
streamTaskGetStatus(pTask, &p);
|
||||
stDebug("s-task:%s send scan-history data complete msg to downstream (shuffle-dispatch) %d tasks, status:%s", pTask->id.idStr,
|
||||
numOfVgs, p);
|
||||
stDebug("s-task:%s send scan-history data complete msg to downstream (shuffle-dispatch) %d tasks, status:%s",
|
||||
pTask->id.idStr, numOfVgs, p);
|
||||
for (int32_t i = 0; i < numOfVgs; i++) {
|
||||
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
|
||||
req.downstreamTaskId = pVgInfo->taskId;
|
||||
|
@ -692,11 +708,12 @@ int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask) {
|
|||
tmsgSendReq(&pInfo->upstreamNodeEpset, &pInfo->msg);
|
||||
|
||||
stDebug("s-task:%s level:%d checkpoint ready msg sent to upstream:0x%x", pTask->id.idStr, pTask->info.taskLevel,
|
||||
pInfo->upStreamTaskId);
|
||||
pInfo->upStreamTaskId);
|
||||
}
|
||||
|
||||
taosArrayClear(pTask->pReadyMsgList);
|
||||
stDebug("s-task:%s level:%d checkpoint ready msg sent to all %d upstreams", pTask->id.idStr, pTask->info.taskLevel, num);
|
||||
stDebug("s-task:%s level:%d checkpoint ready msg sent to all %d upstreams", pTask->id.idStr, pTask->info.taskLevel,
|
||||
num);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -783,7 +800,7 @@ int32_t doDispatchScanHistoryFinishMsg(SStreamTask* pTask, const SStreamScanHist
|
|||
char* p = NULL;
|
||||
streamTaskGetStatus(pTask, &p);
|
||||
stDebug("s-task:%s status:%s dispatch scan-history finish msg to taskId:0x%x (vgId:%d)", pTask->id.idStr, p,
|
||||
pReq->downstreamTaskId, vgId);
|
||||
pReq->downstreamTaskId, vgId);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -866,8 +883,8 @@ int32_t buildCheckpointSourceRsp(SStreamCheckpointSourceReq* pReq, SRpcHandleInf
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo,
|
||||
SStreamTask* pTask, int8_t isSucceed) {
|
||||
int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SStreamTask* pTask,
|
||||
int8_t isSucceed) {
|
||||
SStreamChkptReadyInfo info = {0};
|
||||
buildCheckpointSourceRsp(pReq, pRpcInfo, &info.msg, isSucceed);
|
||||
|
||||
|
@ -876,7 +893,8 @@ int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHa
|
|||
}
|
||||
|
||||
taosArrayPush(pTask->pReadyMsgList, &info);
|
||||
stDebug("s-task:%s add checkpoint source rsp msg, total:%d", pTask->id.idStr, (int32_t)taosArrayGetSize(pTask->pReadyMsgList));
|
||||
stDebug("s-task:%s add checkpoint source rsp msg, total:%d", pTask->id.idStr,
|
||||
(int32_t)taosArrayGetSize(pTask->pReadyMsgList));
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -926,8 +944,10 @@ int32_t streamAddCheckpointReadyMsg(SStreamTask* pTask, int32_t upstreamTaskId,
|
|||
initRpcMsg(&info.msg, TDMT_STREAM_TASK_CHECKPOINT_READY, buf, tlen + sizeof(SMsgHead));
|
||||
info.msg.info.noResp = 1; // refactor later.
|
||||
|
||||
stDebug("s-task:%s (level:%d) prepare checkpoint ready msg to upstream s-task:0x%" PRIx64 ":0x%x (vgId:%d) idx:%d, vgId:%d",
|
||||
pTask->id.idStr, pTask->info.taskLevel, req.streamId, req.upstreamTaskId, req.upstreamNodeId, index, req.upstreamNodeId);
|
||||
stDebug("s-task:%s (level:%d) prepare checkpoint ready msg to upstream s-task:0x%" PRIx64
|
||||
":0x%x (vgId:%d) idx:%d, vgId:%d",
|
||||
pTask->id.idStr, pTask->info.taskLevel, req.streamId, req.upstreamTaskId, req.upstreamNodeId, index,
|
||||
req.upstreamNodeId);
|
||||
|
||||
if (pTask->pReadyMsgList == NULL) {
|
||||
pTask->pReadyMsgList = taosArrayInit(4, sizeof(SStreamChkptReadyInfo));
|
||||
|
@ -937,6 +957,16 @@ int32_t streamAddCheckpointReadyMsg(SStreamTask* pTask, int32_t upstreamTaskId,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void streamClearChkptReadyMsg(SStreamTask* pTask) {
|
||||
if (pTask->pReadyMsgList == NULL) return;
|
||||
|
||||
for (int i = 0; i < taosArrayGetSize(pTask->pReadyMsgList); i++) {
|
||||
SStreamChkptReadyInfo* pInfo = taosArrayGet(pTask->pReadyMsgList, i);
|
||||
rpcFreeCont(pInfo->msg.pCont);
|
||||
}
|
||||
taosArrayClear(pTask->pReadyMsgList);
|
||||
}
|
||||
|
||||
int32_t tEncodeCompleteHistoryDataMsg(SEncoder* pEncoder, const SStreamCompleteHistoryMsg* pReq) {
|
||||
if (tStartEncode(pEncoder) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
|
||||
|
@ -959,7 +989,8 @@ int32_t tDecodeCompleteHistoryDataMsg(SDecoder* pDecoder, SStreamCompleteHistory
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamTaskBuildScanhistoryRspMsg(SStreamTask* pTask, SStreamScanHistoryFinishReq* pReq, void** pBuffer, int32_t* pLen) {
|
||||
int32_t streamTaskBuildScanhistoryRspMsg(SStreamTask* pTask, SStreamScanHistoryFinishReq* pReq, void** pBuffer,
|
||||
int32_t* pLen) {
|
||||
int32_t len = 0;
|
||||
int32_t code = 0;
|
||||
SEncoder encoder;
|
||||
|
@ -1016,7 +1047,7 @@ int32_t streamAddEndScanHistoryMsg(SStreamTask* pTask, SRpcHandleInfo* pRpcInfo,
|
|||
|
||||
int32_t num = taosArrayGetSize(pTask->pRspMsgList);
|
||||
stDebug("s-task:%s add scan-history finish rsp msg for task:0x%x, total:%d", pTask->id.idStr, pReq->upstreamTaskId,
|
||||
num);
|
||||
num);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1039,30 +1070,14 @@ int32_t streamNotifyUpstreamContinue(SStreamTask* pTask) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void dispatchDataInFuture(void* param, void* tmrId) {
|
||||
SStreamTask* pTask = param;
|
||||
if (streamTaskShouldStop(pTask)) {
|
||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||
stDebug("s-task:%s should stop, abort from timer, ref:%d", pTask->id.idStr, ref);
|
||||
return;
|
||||
}
|
||||
|
||||
ETaskStatus status = streamTaskGetStatus(pTask, NULL);
|
||||
if (status == TASK_STATUS__CK) {
|
||||
stDebug("s-task:%s in checkpoint status, wait for 500ms to dispatch data downstream", pTask->id.idStr);
|
||||
taosTmrReset(dispatchDataInFuture, 500, pTask, streamEnv.timer, &pTask->msgInfo.pTimer);
|
||||
} else {
|
||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||
stDebug("s-task:%s start to dispatch data, jump out of timer, ref:%d", pTask->id.idStr, ref);
|
||||
streamDispatchStreamBlock(pTask);
|
||||
}
|
||||
}
|
||||
|
||||
// this message has been sent successfully, let's try next one.
|
||||
static int32_t handleDispatchSuccessRsp(SStreamTask* pTask, int32_t downstreamId) {
|
||||
destroyDispatchMsg(pTask->msgInfo.pData, getNumOfDispatchBranch(pTask));
|
||||
|
||||
bool delayDispatch = (pTask->msgInfo.dispatchMsgType == STREAM_INPUT__CHECKPOINT_TRIGGER);
|
||||
if (delayDispatch) {
|
||||
pTask->chkInfo.dispatchCheckpointTrigger = true;
|
||||
}
|
||||
|
||||
pTask->msgInfo.pData = NULL;
|
||||
pTask->msgInfo.dispatchMsgType = 0;
|
||||
|
@ -1075,7 +1090,7 @@ static int32_t handleDispatchSuccessRsp(SStreamTask* pTask, int32_t downstreamId
|
|||
stDebug("s-task:%s downstream task:0x%x resume to normal from inputQ blocking, blocking time:%" PRId64 "ms",
|
||||
pTask->id.idStr, downstreamId, el);
|
||||
} else {
|
||||
stDebug("s-task:%s dispatch completed, elapsed time:%"PRId64"ms", pTask->id.idStr, el);
|
||||
stDebug("s-task:%s dispatch completed, elapsed time:%" PRId64 "ms", pTask->id.idStr, el);
|
||||
}
|
||||
|
||||
// now ready for next data output
|
||||
|
@ -1083,13 +1098,7 @@ static int32_t handleDispatchSuccessRsp(SStreamTask* pTask, int32_t downstreamId
|
|||
|
||||
// otherwise, continue dispatch the first block to down stream task in pipeline
|
||||
if (delayDispatch) {
|
||||
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
||||
stDebug("s-task:%s in checkpoint status, add in timer, try dispatch data in 500ms, ref:%d", pTask->id.idStr, ref);
|
||||
if (pTask->msgInfo.pTimer != NULL) {
|
||||
taosTmrReset(dispatchDataInFuture, 500, pTask, streamEnv.timer, &pTask->msgInfo.pTimer);
|
||||
} else {
|
||||
pTask->msgInfo.pTimer = taosTmrStart(dispatchDataInFuture, 500, pTask, streamEnv.timer);
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
streamDispatchStreamBlock(pTask);
|
||||
}
|
||||
|
@ -1100,11 +1109,19 @@ static int32_t handleDispatchSuccessRsp(SStreamTask* pTask, int32_t downstreamId
|
|||
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code) {
|
||||
const char* id = pTask->id.idStr;
|
||||
int32_t vgId = pTask->pMeta->vgId;
|
||||
int32_t msgId = pTask->execInfo.dispatch;
|
||||
int32_t msgId = pTask->execInfo.dispatch;
|
||||
|
||||
#if 0
|
||||
// for test purpose, build the failure case
|
||||
if (pTask->msgInfo.dispatchMsgType == STREAM_INPUT__CHECKPOINT_TRIGGER) {
|
||||
pRsp->inputStatus = TASK_INPUT_STATUS__REFUSED;
|
||||
}
|
||||
#endif
|
||||
|
||||
// follower not handle the dispatch rsp
|
||||
if ((pTask->pMeta->role == NODE_ROLE_FOLLOWER) || (pTask->status.downstreamReady != 1)) {
|
||||
stError("s-task:%s vgId:%d is follower or task just re-launched, not handle the dispatch rsp, discard it", id, vgId);
|
||||
stError("s-task:%s vgId:%d is follower or task just re-launched, not handle the dispatch rsp, discard it", id,
|
||||
vgId);
|
||||
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||
}
|
||||
|
||||
|
@ -1122,8 +1139,8 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
|
|||
// flag. Here we need to retry dispatch this message to downstream task immediately. handle the case the failure
|
||||
// happened too fast.
|
||||
if (code == TSDB_CODE_STREAM_TASK_NOT_EXIST) { // destination task does not exist, not retry anymore
|
||||
stError("s-task:%s failed to dispatch msg to task:0x%x(vgId:%d), msgId:%d no retry, since task destroyed already", id,
|
||||
pRsp->downstreamTaskId, pRsp->downstreamNodeId, msgId);
|
||||
stError("s-task:%s failed to dispatch msg to task:0x%x(vgId:%d), msgId:%d no retry, since task destroyed already",
|
||||
id, pRsp->downstreamTaskId, pRsp->downstreamNodeId, msgId);
|
||||
} else {
|
||||
stError("s-task:%s failed to dispatch msgId:%d to task:0x%x(vgId:%d), code:%s, add to retry list", id, msgId,
|
||||
pRsp->downstreamTaskId, pRsp->downstreamNodeId, tstrerror(code));
|
||||
|
@ -1143,8 +1160,21 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
|
|||
stWarn("s-task:%s inputQ of downstream task:0x%x(vgId:%d) is full, wait for %dms and retry dispatch", id,
|
||||
pRsp->downstreamTaskId, pRsp->downstreamNodeId, DISPATCH_RETRY_INTERVAL_MS);
|
||||
} else if (pRsp->inputStatus == TASK_INPUT_STATUS__REFUSED) {
|
||||
stError("s-task:%s downstream task:0x%x(vgId:%d) refused the dispatch msg, treat it as success", id,
|
||||
pRsp->downstreamTaskId, pRsp->downstreamNodeId);
|
||||
// todo handle the agg task failure, add test case
|
||||
if (pTask->msgInfo.dispatchMsgType == STREAM_INPUT__CHECKPOINT_TRIGGER &&
|
||||
pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||
stError("s-task:%s failed to dispatch checkpoint-trigger msg, checkpointId:%" PRId64
|
||||
", set the current checkpoint failed, and send rsp to mnode",
|
||||
id, pTask->checkpointingId);
|
||||
{ // send checkpoint failure msg to mnode directly
|
||||
pTask->chkInfo.failedId = pTask->checkpointingId; // record the latest failed checkpoint id
|
||||
pTask->checkpointingId = pTask->checkpointingId;
|
||||
streamTaskSendCheckpointSourceRsp(pTask);
|
||||
}
|
||||
} else {
|
||||
stError("s-task:%s downstream task:0x%x(vgId:%d) refused the dispatch msg, treat it as success", id,
|
||||
pRsp->downstreamTaskId, pRsp->downstreamNodeId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1154,16 +1184,18 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
|
|||
ASSERT(leftRsp >= 0);
|
||||
|
||||
if (leftRsp > 0) {
|
||||
stDebug( "s-task:%s recv dispatch rsp, msgId:%d from 0x%x(vgId:%d), downstream task input status:%d code:%s, waiting for %d rsp",
|
||||
id, msgId, pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->inputStatus, tstrerror(code), leftRsp);
|
||||
stDebug(
|
||||
"s-task:%s recv dispatch rsp, msgId:%d from 0x%x(vgId:%d), downstream task input status:%d code:%s, waiting "
|
||||
"for %d rsp",
|
||||
id, msgId, pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->inputStatus, tstrerror(code), leftRsp);
|
||||
} else {
|
||||
stDebug(
|
||||
"s-task:%s recv dispatch rsp, msgId:%d from 0x%x(vgId:%d), downstream task input status:%d code:%s, all rsp",
|
||||
id, msgId, pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->inputStatus, tstrerror(code));
|
||||
}
|
||||
} else {
|
||||
stDebug("s-task:%s recv fix-dispatch rsp, msgId:%d from 0x%x(vgId:%d), downstream task input status:%d code:%s",
|
||||
id, msgId, pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->inputStatus, tstrerror(code));
|
||||
stDebug("s-task:%s recv fix-dispatch rsp, msgId:%d from 0x%x(vgId:%d), downstream task input status:%d code:%s", id,
|
||||
msgId, pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->inputStatus, tstrerror(code));
|
||||
}
|
||||
|
||||
ASSERT(leftRsp >= 0);
|
||||
|
@ -1185,7 +1217,7 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
|
|||
pTask->id.idStr, DISPATCH_RETRY_INTERVAL_MS, ref);
|
||||
|
||||
streamRetryDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
||||
} else { // this message has been sent successfully, let's try next one.
|
||||
} else { // this message has been sent successfully, let's try next one.
|
||||
pTask->msgInfo.retryCount = 0;
|
||||
|
||||
// transtate msg has been sent to downstream successfully. let's transfer the fill-history task state
|
||||
|
|
|
@ -48,6 +48,7 @@ static int32_t doOutputResultBlockImpl(SStreamTask* pTask, SStreamDataBlock* pBl
|
|||
return code;
|
||||
}
|
||||
|
||||
// checkpoint trigger will be checked
|
||||
streamDispatchStreamBlock(pTask);
|
||||
}
|
||||
|
||||
|
|
|
@ -388,32 +388,36 @@ int32_t streamTaskInitTokenBucket(STokenBucket* pBucket, int32_t numCap, int32_t
|
|||
pBucket->quotaCapacity = quotaRate * MAX_SMOOTH_BURST_RATIO;
|
||||
pBucket->quotaRemain = pBucket->quotaCapacity;
|
||||
|
||||
pBucket->fillTimestamp = taosGetTimestampMs();
|
||||
pBucket->tokenFillTimestamp = taosGetTimestampMs();
|
||||
pBucket->quotaFillTimestamp = taosGetTimestampMs();
|
||||
stDebug("s-task:%s sink quotaRate:%.2fMiB, numRate:%d", id, quotaRate, numRate);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void fillTokenBucket(STokenBucket* pBucket, const char* id) {
|
||||
int64_t now = taosGetTimestampMs();
|
||||
int64_t delta = now - pBucket->fillTimestamp;
|
||||
|
||||
int64_t deltaToken = now - pBucket->tokenFillTimestamp;
|
||||
ASSERT(pBucket->numOfToken >= 0);
|
||||
|
||||
int32_t incNum = (delta / 1000.0) * pBucket->numRate;
|
||||
int32_t incNum = (deltaToken / 1000.0) * pBucket->numRate;
|
||||
if (incNum > 0) {
|
||||
pBucket->numOfToken = TMIN(pBucket->numOfToken + incNum, pBucket->numCapacity);
|
||||
pBucket->fillTimestamp = now;
|
||||
pBucket->tokenFillTimestamp = now;
|
||||
}
|
||||
|
||||
// increase the new available quota as time goes on
|
||||
double incSize = (delta / 1000.0) * pBucket->quotaRate;
|
||||
int64_t deltaQuota = now - pBucket->quotaFillTimestamp;
|
||||
double incSize = (deltaQuota / 1000.0) * pBucket->quotaRate;
|
||||
if (incSize > 0) {
|
||||
pBucket->quotaRemain = TMIN(pBucket->quotaRemain + incSize, pBucket->quotaCapacity);
|
||||
pBucket->fillTimestamp = now;
|
||||
pBucket->quotaFillTimestamp = now;
|
||||
}
|
||||
|
||||
if (incNum > 0 || incSize > 0) {
|
||||
stTrace("token/quota available, token:%d inc:%d, quota:%.2fMiB inc:%.3fMiB, ts:%" PRId64 " idle:%" PRId64 "ms, %s",
|
||||
pBucket->numOfToken, incNum, pBucket->quotaRemain, incSize, now, delta, id);
|
||||
stTrace("token/quota available, token:%d inc:%d, token_TsDelta:%" PRId64
|
||||
", quota:%.2fMiB inc:%.3fMiB quotaTs:%" PRId64 " now:%" PRId64 "ms, %s",
|
||||
pBucket->numOfToken, incNum, deltaToken, pBucket->quotaRemain, incSize, deltaQuota, now, id);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -168,7 +168,6 @@ int32_t streamTaskStartScanHistory(SStreamTask* pTask) {
|
|||
} else if (level == TASK_LEVEL__AGG) {
|
||||
if (pTask->info.fillHistory) {
|
||||
streamSetParamForScanHistory(pTask);
|
||||
streamTaskEnablePause(pTask);
|
||||
}
|
||||
} else if (level == TASK_LEVEL__SINK) {
|
||||
stDebug("s-task:%s sink task do nothing to handle scan-history", pTask->id.idStr);
|
||||
|
@ -346,7 +345,6 @@ int32_t onNormalTaskReady(SStreamTask* pTask) {
|
|||
stDebug("s-task:%s level:%d status:%s sched-status:%d", id, pTask->info.taskLevel, p, pTask->status.schedStatus);
|
||||
}
|
||||
|
||||
streamTaskEnablePause(pTask);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -660,9 +658,6 @@ int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask) {
|
|||
streamMetaCommit(pMeta);
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
// history data scan in the stream time window finished, now let's enable the pause
|
||||
streamTaskEnablePause(pTask);
|
||||
|
||||
// for source tasks, let's continue execute.
|
||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||
streamSchedExec(pTask);
|
||||
|
@ -1041,11 +1036,6 @@ void streamTaskResume(SStreamTask* pTask) {
|
|||
}
|
||||
}
|
||||
|
||||
void streamTaskEnablePause(SStreamTask* pTask) {
|
||||
stDebug("s-task:%s enable task pause", pTask->id.idStr);
|
||||
pTask->status.pauseAllowed = 1;
|
||||
}
|
||||
|
||||
static void displayStatusInfo(SStreamMeta* pMeta, SHashObj* pTaskSet, bool succ) {
|
||||
int32_t vgId = pMeta->vgId;
|
||||
void* pIter = NULL;
|
||||
|
|
|
@ -1086,7 +1086,6 @@ _end:
|
|||
}
|
||||
|
||||
int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char tbname[TSDB_TABLE_NAME_LEN]) {
|
||||
stDebug("try to write to cf parname");
|
||||
#ifdef USE_ROCKSDB
|
||||
if (tSimpleHashGetSize(pState->parNameMap) > MAX_TABLE_NAME_NUM) {
|
||||
if (tSimpleHashGet(pState->parNameMap, &groupId, sizeof(int64_t)) == NULL) {
|
||||
|
|
|
@ -15,11 +15,11 @@
|
|||
|
||||
#include "executor.h"
|
||||
#include "streamInt.h"
|
||||
#include "streamsm.h"
|
||||
#include "tmisce.h"
|
||||
#include "tstream.h"
|
||||
#include "ttimer.h"
|
||||
#include "wal.h"
|
||||
#include "streamsm.h"
|
||||
|
||||
static void streamTaskDestroyUpstreamInfo(SUpstreamInfo* pUpstreamInfo);
|
||||
|
||||
|
@ -309,11 +309,11 @@ void tFreeStreamTask(SStreamTask* pTask) {
|
|||
stDebug("start to free s-task:0x%x, %p, state:%p", taskId, pTask, pTask->pState);
|
||||
|
||||
stDebug("s-task:0x%x task exec summary: create:%" PRId64 ", init:%" PRId64 ", start:%" PRId64
|
||||
", updateCount:%d latestUpdate:%" PRId64 ", latestCheckPoint:%" PRId64 ", ver:%" PRId64
|
||||
" nextProcessVer:%" PRId64", checkpointCount:%d",
|
||||
taskId, pStatis->created, pStatis->init, pStatis->start, pStatis->updateCount, pStatis->latestUpdateTs,
|
||||
pTask->chkInfo.checkpointId, pTask->chkInfo.checkpointVer, pTask->chkInfo.nextProcessVer,
|
||||
pStatis->checkpoint);
|
||||
", updateCount:%d latestUpdate:%" PRId64 ", latestCheckPoint:%" PRId64 ", ver:%" PRId64
|
||||
" nextProcessVer:%" PRId64 ", checkpointCount:%d",
|
||||
taskId, pStatis->created, pStatis->init, pStatis->start, pStatis->updateCount, pStatis->latestUpdateTs,
|
||||
pTask->chkInfo.checkpointId, pTask->chkInfo.checkpointVer, pTask->chkInfo.nextProcessVer,
|
||||
pStatis->checkpoint);
|
||||
|
||||
// remove the ref by timer
|
||||
while (pTask->status.timerActive > 0) {
|
||||
|
@ -358,7 +358,9 @@ void tFreeStreamTask(SStreamTask* pTask) {
|
|||
walCloseReader(pTask->exec.pWalReader);
|
||||
}
|
||||
|
||||
streamClearChkptReadyMsg(pTask);
|
||||
pTask->pReadyMsgList = taosArrayDestroy(pTask->pReadyMsgList);
|
||||
|
||||
if (pTask->msgInfo.pData != NULL) {
|
||||
destroyDispatchMsg(pTask->msgInfo.pData, getNumOfDispatchBranch(pTask));
|
||||
pTask->msgInfo.pData = NULL;
|
||||
|
@ -422,7 +424,7 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i
|
|||
pTask->status.pSM = streamCreateStateMachine(pTask);
|
||||
if (pTask->status.pSM == NULL) {
|
||||
stError("s-task:%s failed create state-machine for stream task, initialization failed, code:%s", pTask->id.idStr,
|
||||
tstrerror(terrno));
|
||||
tstrerror(terrno));
|
||||
return terrno;
|
||||
}
|
||||
|
||||
|
@ -434,7 +436,7 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i
|
|||
pTask->chkInfo.checkpointVer = ver - 1; // only update when generating checkpoint
|
||||
pTask->chkInfo.processedVer = ver - 1; // already processed version
|
||||
|
||||
pTask->chkInfo.nextProcessVer = ver; // next processed version
|
||||
pTask->chkInfo.nextProcessVer = ver; // next processed version
|
||||
pTask->dataRange.range.maxVer = ver;
|
||||
pTask->dataRange.range.minVer = ver;
|
||||
pTask->pMsgCb = pMsgCb;
|
||||
|
@ -442,7 +444,8 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i
|
|||
|
||||
pTask->outputInfo.pTokenBucket = taosMemoryCalloc(1, sizeof(STokenBucket));
|
||||
if (pTask->outputInfo.pTokenBucket == NULL) {
|
||||
stError("s-task:%s failed to prepare the tokenBucket, code:%s", pTask->id.idStr, tstrerror(TSDB_CODE_OUT_OF_MEMORY));
|
||||
stError("s-task:%s failed to prepare the tokenBucket, code:%s", pTask->id.idStr,
|
||||
tstrerror(TSDB_CODE_OUT_OF_MEMORY));
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -451,7 +454,7 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i
|
|||
streamTaskInitTokenBucket(pTask->outputInfo.pTokenBucket, 50, 50, tsSinkDataRate, pTask->id.idStr);
|
||||
|
||||
TdThreadMutexAttr attr = {0};
|
||||
int code = taosThreadMutexAttrInit(&attr);
|
||||
int code = taosThreadMutexAttrInit(&attr);
|
||||
if (code != 0) {
|
||||
stError("s-task:%s initElapsed mutex attr failed, code:%s", pTask->id.idStr, tstrerror(code));
|
||||
return code;
|
||||
|
@ -529,8 +532,8 @@ void streamTaskUpdateUpstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpS
|
|||
SStreamChildEpInfo* pInfo = taosArrayGetP(pTask->upstreamInfo.pList, i);
|
||||
if (pInfo->nodeId == nodeId) {
|
||||
epsetAssign(&pInfo->epSet, pEpSet);
|
||||
stDebug("s-task:0x%x update the upstreamInfo taskId:0x%x(nodeId:%d) newEpset:%s", pTask->id.taskId,
|
||||
pInfo->taskId, nodeId, buf);
|
||||
stDebug("s-task:0x%x update the upstreamInfo taskId:0x%x(nodeId:%d) newEpset:%s", pTask->id.taskId, pInfo->taskId,
|
||||
nodeId, buf);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -569,7 +572,7 @@ void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SE
|
|||
if (pVgInfo->vgId == nodeId) {
|
||||
epsetAssign(&pVgInfo->epSet, pEpSet);
|
||||
stDebug("s-task:0x%x update the dispatch info, task:0x%x(nodeId:%d) newEpset:%s", pTask->id.taskId,
|
||||
pVgInfo->taskId, nodeId, buf);
|
||||
pVgInfo->taskId, nodeId, buf);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -578,7 +581,7 @@ void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SE
|
|||
if (pDispatcher->nodeId == nodeId) {
|
||||
epsetAssign(&pDispatcher->epSet, pEpSet);
|
||||
stDebug("s-task:0x%x update the dispatch info, task:0x%x(nodeId:%d) newEpSet:%s", pTask->id.taskId,
|
||||
pDispatcher->taskId, nodeId, buf);
|
||||
pDispatcher->taskId, nodeId, buf);
|
||||
}
|
||||
} else {
|
||||
// do nothing
|
||||
|
@ -586,9 +589,9 @@ void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SE
|
|||
}
|
||||
|
||||
int32_t streamTaskStop(SStreamTask* pTask) {
|
||||
int32_t vgId = pTask->pMeta->vgId;
|
||||
int64_t st = taosGetTimestampMs();
|
||||
const char* id = pTask->id.idStr;
|
||||
int32_t vgId = pTask->pMeta->vgId;
|
||||
int64_t st = taosGetTimestampMs();
|
||||
const char* id = pTask->id.idStr;
|
||||
|
||||
streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_STOP);
|
||||
qKillTask(pTask->exec.pExecutor, TSDB_CODE_SUCCESS);
|
||||
|
@ -635,7 +638,7 @@ int32_t streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList) {
|
|||
p->latestUpdateTs = taosGetTimestampMs();
|
||||
p->updateCount += 1;
|
||||
stDebug("s-task:0x%x update task nodeEp epset, updatedNodes:%d, updateCount:%d, prevTs:%" PRId64, pTask->id.taskId,
|
||||
numOfNodes, p->updateCount, prevTs);
|
||||
numOfNodes, p->updateCount, prevTs);
|
||||
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pNodeList); ++i) {
|
||||
SNodeUpdateInfo* pInfo = taosArrayGet(pNodeList, i);
|
||||
|
@ -706,7 +709,7 @@ int32_t streamTaskClearHTaskAttr(SStreamTask* pTask) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
STaskId sTaskId = {.streamId = pTask->streamTaskId.streamId, .taskId = pTask->streamTaskId.taskId};
|
||||
STaskId sTaskId = {.streamId = pTask->streamTaskId.streamId, .taskId = pTask->streamTaskId.taskId};
|
||||
SStreamTask** ppStreamTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &sTaskId, sizeof(sTaskId));
|
||||
|
||||
if (ppStreamTask != NULL) {
|
||||
|
@ -720,7 +723,7 @@ int32_t streamTaskClearHTaskAttr(SStreamTask* pTask) {
|
|||
}
|
||||
|
||||
int32_t streamBuildAndSendDropTaskMsg(SMsgCb* pMsgCb, int32_t vgId, SStreamTaskId* pTaskId) {
|
||||
SVDropStreamTaskReq *pReq = rpcMallocCont(sizeof(SVDropStreamTaskReq));
|
||||
SVDropStreamTaskReq* pReq = rpcMallocCont(sizeof(SVDropStreamTaskReq));
|
||||
if (pReq == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
|
|
|
@ -269,6 +269,7 @@ int32_t streamTaskHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event) {
|
|||
pTask->id.idStr, pSM->current.name, GET_EVT_NAME(evt));
|
||||
taosMsleep(100);
|
||||
} else {
|
||||
// no active event trans exists, handle this event directly
|
||||
pTrans = streamTaskFindTransform(pSM->current.state, event);
|
||||
if (pTrans == NULL) {
|
||||
stDebug("s-task:%s failed to handle event:%s", pTask->id.idStr, GET_EVT_NAME(event));
|
||||
|
@ -451,60 +452,43 @@ int32_t initStateTransferTable() {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
//clang-format off
|
||||
void doInitStateTransferTable(void) {
|
||||
streamTaskSMTrans = taosArrayInit(8, sizeof(STaskStateTrans));
|
||||
|
||||
// initialization event handle
|
||||
STaskStateTrans trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__READY, TASK_EVENT_INIT,
|
||||
streamTaskInitStatus, onNormalTaskReady, false, false);
|
||||
STaskStateTrans trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__READY, TASK_EVENT_INIT, streamTaskInitStatus, onNormalTaskReady, false, false);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__SCAN_HISTORY, TASK_EVENT_INIT_SCANHIST,
|
||||
streamTaskInitStatus, onScanhistoryTaskReady, false, false);
|
||||
trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__SCAN_HISTORY, TASK_EVENT_INIT_SCANHIST, streamTaskInitStatus, onScanhistoryTaskReady, false, false);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__STREAM_SCAN_HISTORY, TASK_EVENT_INIT_STREAM_SCANHIST,
|
||||
streamTaskInitStatus, onScanhistoryTaskReady, false, false);
|
||||
trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__STREAM_SCAN_HISTORY, TASK_EVENT_INIT_STREAM_SCANHIST, streamTaskInitStatus, onScanhistoryTaskReady, false, false);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
// scan-history related event
|
||||
trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__READY, TASK_EVENT_SCANHIST_DONE, NULL, NULL,
|
||||
NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__READY, TASK_EVENT_SCANHIST_DONE, NULL, NULL, NULL, true);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__READY, TASK_EVENT_SCANHIST_DONE, NULL,
|
||||
NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__READY, TASK_EVENT_SCANHIST_DONE, NULL, NULL, NULL, true);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
// halt stream task, from other task status
|
||||
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL,
|
||||
streamTaskKeepCurrentVerInWal, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL, true);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL,
|
||||
streamTaskKeepCurrentVerInWal, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL, true);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
SAttachedEventInfo info = {.status = TASK_STATUS__READY, .event = TASK_EVENT_HALT};
|
||||
trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL,
|
||||
streamTaskKeepCurrentVerInWal, &info, true);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal,
|
||||
&info, true);
|
||||
trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, &info, true);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL,
|
||||
streamTaskKeepCurrentVerInWal, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, &info, true);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL, true);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
// checkpoint related event
|
||||
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__CK, TASK_EVENT_GEN_CHECKPOINT, NULL,
|
||||
streamTaskDoCheckpoint, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__CK, TASK_EVENT_GEN_CHECKPOINT, NULL, streamTaskDoCheckpoint, NULL, true);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
trans =
|
||||
createStateTransform(TASK_STATUS__CK, TASK_STATUS__READY, TASK_EVENT_CHECKPOINT_DONE, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__READY, TASK_EVENT_CHECKPOINT_DONE, NULL, NULL, NULL, true);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
// pause & resume related event handle
|
||||
|
@ -571,4 +555,5 @@ void doInitStateTransferTable(void) {
|
|||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
}
|
||||
}
|
||||
//clang-format on
|
|
@ -28,10 +28,12 @@
|
|||
|
||||
static int32_t httpRefMgt = 0;
|
||||
static int64_t httpRef = -1;
|
||||
static int32_t FAST_FAILURE_LIMIT = 120;
|
||||
typedef struct SHttpModule {
|
||||
uv_loop_t* loop;
|
||||
SAsyncPool* asyncPool;
|
||||
TdThread thread;
|
||||
SHashObj* connStatusTable;
|
||||
} SHttpModule;
|
||||
|
||||
typedef struct SHttpMsg {
|
||||
|
@ -64,6 +66,8 @@ static void httpHandleReq(SHttpMsg* msg);
|
|||
static void httpHandleQuit(SHttpMsg* msg);
|
||||
static int32_t httpSendQuit();
|
||||
|
||||
static bool httpFailFastShoudIgnoreMsg(SHashObj* pTable, char* server, int16_t port);
|
||||
static void httpFailFastMayUpdate(SHashObj* pTable, char* server, int16_t port, int8_t succ);
|
||||
static int32_t taosSendHttpReportImpl(const char* server, const char* uri, uint16_t port, char* pCont, int32_t contLen,
|
||||
EHttpCompFlag flag);
|
||||
|
||||
|
@ -193,11 +197,20 @@ static void httpAsyncCb(uv_async_t* handle) {
|
|||
SHttpMsg *msg = NULL, *quitMsg = NULL;
|
||||
|
||||
queue wq;
|
||||
QUEUE_INIT(&wq);
|
||||
|
||||
static int32_t BATCH_SIZE = 5;
|
||||
int32_t count = 0;
|
||||
|
||||
taosThreadMutexLock(&item->mtx);
|
||||
QUEUE_MOVE(&item->qmsg, &wq);
|
||||
|
||||
while (!QUEUE_IS_EMPTY(&item->qmsg) && count++ < BATCH_SIZE) {
|
||||
queue* h = QUEUE_HEAD(&item->qmsg);
|
||||
QUEUE_REMOVE(h);
|
||||
QUEUE_PUSH(&wq, h);
|
||||
}
|
||||
taosThreadMutexUnlock(&item->mtx);
|
||||
|
||||
int count = 0;
|
||||
while (!QUEUE_IS_EMPTY(&wq)) {
|
||||
queue* h = QUEUE_HEAD(&wq);
|
||||
QUEUE_REMOVE(h);
|
||||
|
@ -262,14 +275,20 @@ static void clientSentCb(uv_write_t* req, int32_t status) {
|
|||
}
|
||||
}
|
||||
static void clientConnCb(uv_connect_t* req, int32_t status) {
|
||||
SHttpModule* http = taosAcquireRef(httpRefMgt, httpRef);
|
||||
SHttpClient* cli = req->data;
|
||||
if (status != 0) {
|
||||
httpFailFastMayUpdate(http->connStatusTable, cli->addr, cli->port, 0);
|
||||
|
||||
tError("http-report failed to conn to server, reason:%s, dst:%s:%d", uv_strerror(status), cli->addr, cli->port);
|
||||
if (!uv_is_closing((uv_handle_t*)&cli->tcp)) {
|
||||
uv_close((uv_handle_t*)&cli->tcp, clientCloseCb);
|
||||
}
|
||||
taosReleaseRef(httpRefMgt, httpRef);
|
||||
return;
|
||||
}
|
||||
httpFailFastMayUpdate(http->connStatusTable, cli->addr, cli->port, 1);
|
||||
|
||||
status = uv_write(&cli->req, (uv_stream_t*)&cli->tcp, cli->wbuf, 2, clientSentCb);
|
||||
if (0 != status) {
|
||||
tError("http-report failed to send data,reason:%s, dst:%s:%d", uv_strerror(status), cli->addr, cli->port);
|
||||
|
@ -277,6 +296,7 @@ static void clientConnCb(uv_connect_t* req, int32_t status) {
|
|||
uv_close((uv_handle_t*)&cli->tcp, clientCloseCb);
|
||||
}
|
||||
}
|
||||
taosReleaseRef(httpRefMgt, httpRef);
|
||||
}
|
||||
|
||||
int32_t httpSendQuit() {
|
||||
|
@ -349,16 +369,51 @@ static void httpHandleQuit(SHttpMsg* msg) {
|
|||
uv_walk(http->loop, httpWalkCb, NULL);
|
||||
taosReleaseRef(httpRefMgt, httpRef);
|
||||
}
|
||||
|
||||
static bool httpFailFastShoudIgnoreMsg(SHashObj* pTable, char* server, int16_t port) {
|
||||
char buf[256] = {0};
|
||||
sprintf(buf, "%s:%d", server, port);
|
||||
|
||||
int32_t* failedTime = (int32_t*)taosHashGet(pTable, buf, strlen(buf));
|
||||
if (failedTime == NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t now = taosGetTimestampSec();
|
||||
if (*failedTime > now - FAST_FAILURE_LIMIT) {
|
||||
tDebug("http-report succ to ignore msg,reason:connection timed out, dst:%s", buf);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
static void httpFailFastMayUpdate(SHashObj* pTable, char* server, int16_t port, int8_t succ) {
|
||||
char buf[256] = {0};
|
||||
sprintf(buf, "%s:%d", server, port);
|
||||
|
||||
if (succ) {
|
||||
taosHashRemove(pTable, buf, strlen(buf));
|
||||
} else {
|
||||
int32_t st = taosGetTimestampSec();
|
||||
taosHashPut(pTable, buf, strlen(buf), &st, sizeof(st));
|
||||
}
|
||||
return;
|
||||
}
|
||||
static void httpHandleReq(SHttpMsg* msg) {
|
||||
int32_t ignore = false;
|
||||
SHttpModule* http = taosAcquireRef(httpRefMgt, httpRef);
|
||||
if (http == NULL) {
|
||||
goto END;
|
||||
}
|
||||
|
||||
if (httpFailFastShoudIgnoreMsg(http->connStatusTable, msg->server, msg->port)) {
|
||||
ignore = true;
|
||||
goto END;
|
||||
}
|
||||
struct sockaddr_in dest = {0};
|
||||
if (taosBuildDstAddr(msg->server, msg->port, &dest) < 0) {
|
||||
goto END;
|
||||
}
|
||||
|
||||
if (msg->flag == HTTP_GZIP) {
|
||||
int32_t dstLen = taosCompressHttpRport(msg->cont, msg->len);
|
||||
if (dstLen > 0) {
|
||||
|
@ -399,11 +454,11 @@ static void httpHandleReq(SHttpMsg* msg) {
|
|||
uv_tcp_init(http->loop, &cli->tcp);
|
||||
|
||||
// set up timeout to avoid stuck;
|
||||
int32_t fd = taosCreateSocketWithTimeout(5);
|
||||
int32_t fd = taosCreateSocketWithTimeout(5000);
|
||||
if (fd < 0) {
|
||||
tError("http-report failed to open socket, dst:%s:%d", cli->addr, cli->port);
|
||||
taosReleaseRef(httpRefMgt, httpRef);
|
||||
destroyHttpClient(cli);
|
||||
taosReleaseRef(httpRefMgt, httpRef);
|
||||
return;
|
||||
}
|
||||
int ret = uv_tcp_open((uv_tcp_t*)&cli->tcp, fd);
|
||||
|
@ -418,13 +473,16 @@ static void httpHandleReq(SHttpMsg* msg) {
|
|||
if (ret != 0) {
|
||||
tError("http-report failed to connect to http-server, reason:%s, dst:%s:%d", uv_strerror(ret), cli->addr,
|
||||
cli->port);
|
||||
httpFailFastMayUpdate(http->connStatusTable, cli->addr, cli->port, 0);
|
||||
destroyHttpClient(cli);
|
||||
}
|
||||
taosReleaseRef(httpRefMgt, httpRef);
|
||||
return;
|
||||
|
||||
END:
|
||||
tError("http-report failed to report, reason: %s, addr: %s:%d", terrstr(), msg->server, msg->port);
|
||||
if (ignore == false) {
|
||||
tError("http-report failed to report, reason: %s, addr: %s:%d", terrstr(), msg->server, msg->port);
|
||||
}
|
||||
httpDestroyMsg(msg);
|
||||
taosReleaseRef(httpRefMgt, httpRef);
|
||||
}
|
||||
|
@ -441,6 +499,8 @@ static void transHttpEnvInit() {
|
|||
|
||||
SHttpModule* http = taosMemoryMalloc(sizeof(SHttpModule));
|
||||
http->loop = taosMemoryMalloc(sizeof(uv_loop_t));
|
||||
http->connStatusTable = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
|
||||
|
||||
uv_loop_init(http->loop);
|
||||
|
||||
http->asyncPool = transAsyncPoolCreate(http->loop, 1, http, httpAsyncCb);
|
||||
|
@ -474,6 +534,8 @@ void transHttpEnvDestroy() {
|
|||
uv_loop_close(load->loop);
|
||||
taosMemoryFree(load->loop);
|
||||
|
||||
taosHashCleanup(load->connStatusTable);
|
||||
|
||||
taosReleaseRef(httpRefMgt, httpRef);
|
||||
taosRemoveRef(httpRefMgt, httpRef);
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ add_executable(transportTest "")
|
|||
add_executable(transUT "")
|
||||
add_executable(svrBench "")
|
||||
add_executable(cliBench "")
|
||||
add_executable(httpBench "")
|
||||
|
||||
target_sources(transUT
|
||||
PRIVATE
|
||||
|
@ -21,6 +22,10 @@ target_sources(cliBench
|
|||
PRIVATE
|
||||
"cliBench.c"
|
||||
)
|
||||
target_sources(httpBench
|
||||
PRIVATE
|
||||
"http_test.c"
|
||||
)
|
||||
|
||||
target_include_directories(transportTest
|
||||
PUBLIC
|
||||
|
@ -51,11 +56,6 @@ target_include_directories(transUT
|
|||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
|
||||
target_include_directories(svrBench
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/transport"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
target_include_directories(svrBench
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/transport"
|
||||
|
@ -75,7 +75,8 @@ target_include_directories(cliBench
|
|||
"${TD_SOURCE_DIR}/include/libs/transport"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
target_include_directories(cliBench
|
||||
|
||||
target_include_directories(httpBench
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/transport"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
|
@ -89,6 +90,14 @@ target_link_libraries (cliBench
|
|||
transport
|
||||
)
|
||||
|
||||
target_link_libraries(httpBench
|
||||
os
|
||||
util
|
||||
common
|
||||
gtest_main
|
||||
transport
|
||||
)
|
||||
|
||||
add_test(
|
||||
NAME transUT
|
||||
COMMAND transUT
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include "os.h"
|
||||
#include "taoserror.h"
|
||||
#include "tglobal.h"
|
||||
#include "thttp.h"
|
||||
#include "transLog.h"
|
||||
#include "trpc.h"
|
||||
#include "tutil.h"
|
||||
#include "tversion.h"
|
||||
|
||||
void initLogEnv() {
|
||||
const char * logDir = "/tmp/trans_cli";
|
||||
const char * defaultLogFileNamePrefix = "taoslog";
|
||||
const int32_t maxLogFileNum = 1000000;
|
||||
tsAsyncLog = 0;
|
||||
// rpcDebugflag = 143;
|
||||
strcpy(tsLogDir, (char *)logDir);
|
||||
taosRemoveDir(tsLogDir);
|
||||
taosMkDir(tsLogDir);
|
||||
|
||||
if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) {
|
||||
printf("failed to open log file in directory:%s\n", tsLogDir);
|
||||
}
|
||||
}
|
||||
typedef struct TThread {
|
||||
TdThread thread;
|
||||
int idx;
|
||||
} TThread;
|
||||
|
||||
void *proces(void *arg) {
|
||||
char *monitor = "172.26.10.94";
|
||||
while (1) {
|
||||
int32_t len = 512;
|
||||
char * msg = taosMemoryCalloc(1, len);
|
||||
memset(msg, 1, len);
|
||||
int32_t code = taosSendHttpReport(monitor, "/crash", 6050, msg, 10, HTTP_FLAT);
|
||||
taosMemoryFree(msg);
|
||||
taosUsleep(10);
|
||||
}
|
||||
}
|
||||
int main(int argc, char *argv[]) {
|
||||
initLogEnv();
|
||||
int32_t numOfThreads = 10;
|
||||
TThread *thread = taosMemoryCalloc(1, sizeof(TThread) * numOfThreads);
|
||||
|
||||
for (int i = 0; i < numOfThreads; i++) {
|
||||
thread[i].idx = i;
|
||||
taosThreadCreate(&(thread[i].thread), NULL, proces, (void *)&thread[i]);
|
||||
}
|
||||
while (1) {
|
||||
taosMsleep(5000);
|
||||
}
|
||||
|
||||
taosCloseLog();
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -16,9 +16,8 @@ sql create table ts2 using st tags(2,2,2);
|
|||
sql create table ts3 using st tags(3,2,2);
|
||||
sql create table ts4 using st tags(4,2,2);
|
||||
sql create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 watermark 1d into streamt1 as select _wstart, count(*) c1, sum(a) c3 from st interval(10s);
|
||||
sleep 1000
|
||||
sleep 2000
|
||||
|
||||
sleep 1000
|
||||
sql pause stream streams1;
|
||||
|
||||
sql insert into ts1 values(1648791213001,1,12,3,1.0);
|
||||
|
|
|
@ -162,7 +162,7 @@ class TDTestCase:
|
|||
assert('TIMESTAMP' in data_type_list and 'INT' in data_type_list and 'INT UNSIGNED' in data_type_list and 'BIGINT' in data_type_list and 'BIGINT UNSIGNED' in data_type_list and 'FLOAT' in data_type_list and 'DOUBLE' in data_type_list and 'VARCHAR' in data_type_list and 'SMALLINT' in data_type_list and 'SMALLINT UNSIGNED' in data_type_list and 'TINYINT' in data_type_list and 'TINYINT UNSIGNED' in data_type_list and 'BOOL' in data_type_list and 'VARCHAR' in data_type_list and 'NCHAR' in data_type_list and 'GEOMETRY' in data_type_list and 'VARBINARY' in data_type_list)
|
||||
tdSql.execute("create view v2 as select * from tb where c1 >5 and c7 like '%ab%';")
|
||||
self.check_view_num(2)
|
||||
tdSql.error("create view v3 as select * from tb where c1 like '%ab%';", expectErrInfo='Invalid value type')
|
||||
tdSql.error("create view v3 as select * from tb where c1 like '%ab%';", expectErrInfo='Invalid operation')
|
||||
tdSql.execute("create view v3 as select first(ts), sum(c1) from tb group by c2 having avg(c4) > 0;")
|
||||
tdSql.execute("create view v4 as select _wstart,sum(c6) from tb interval(10s);")
|
||||
tdSql.execute("create view v5 as select * from tb join v2 on tb.ts = v2.ts;")
|
||||
|
|
Loading…
Reference in New Issue