Merge branch '3.0' into case/TD-28027-3.0
This commit is contained in:
commit
8614d2b7fb
|
@ -91,13 +91,15 @@ The list of currently supported Hints is as follows:
|
||||||
| :-----------: | -------------- | -------------------------- | -----------------------------------|
|
| :-----------: | -------------- | -------------------------- | -----------------------------------|
|
||||||
| BATCH_SCAN | None | Batch table scan | JOIN statment for stable |
|
| BATCH_SCAN | None | Batch table scan | JOIN statment for stable |
|
||||||
| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable |
|
| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable |
|
||||||
| SORT_FOR_GROUP| None | Use sort for partition | With normal column in partition by list |
|
| SORT_FOR_GROUP| None | Use sort for partition, conflict with PARTITION_FIRST | With normal column in partition by list |
|
||||||
|
| PARTITION_FIRST| None | Use Partition before aggregate, conflict with SORT_FOR_GROUP | With normal column in partition by list |
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
|
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
|
||||||
SELECT /*+ SORT_FOR_GROUP() */ count(*), c1 FROM stable1 PARTITION BY c1;
|
SELECT /*+ SORT_FOR_GROUP() */ count(*), c1 FROM stable1 PARTITION BY c1;
|
||||||
|
SELECT /*+ PARTITION_FIRST() */ count(*), c1 FROM stable1 PARTITION BY c1;
|
||||||
```
|
```
|
||||||
|
|
||||||
## Lists
|
## Lists
|
||||||
|
|
|
@ -91,13 +91,15 @@ Hints 是用户控制单个语句查询优化的一种手段,当 Hint 不适
|
||||||
| :-----------: | -------------- | -------------------------- | -----------------------------|
|
| :-----------: | -------------- | -------------------------- | -----------------------------|
|
||||||
| BATCH_SCAN | 无 | 采用批量读表的方式 | 超级表 JOIN 语句 |
|
| BATCH_SCAN | 无 | 采用批量读表的方式 | 超级表 JOIN 语句 |
|
||||||
| NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 |
|
| NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 |
|
||||||
| SORT_FOR_GROUP| 无 | 采用sort方式进行分组 | partition by 列表有普通列时 |
|
| SORT_FOR_GROUP| 无 | 采用sort方式进行分组, 与PARTITION_FIRST冲突 | partition by 列表有普通列时 |
|
||||||
|
| PARTITION_FIRST| 无 | 在聚合之前使用PARTITION计算分组, 与SORT_FOR_GROUP冲突 | partition by 列表有普通列时 |
|
||||||
|
|
||||||
举例:
|
举例:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
|
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
|
||||||
SELECT /*+ SORT_FOR_GROUP() */ count(*), c1 FROM stable1 PARTITION BY c1;
|
SELECT /*+ SORT_FOR_GROUP() */ count(*), c1 FROM stable1 PARTITION BY c1;
|
||||||
|
SELECT /*+ PARTITION_FIRST() */ count(*), c1 FROM stable1 PARTITION BY c1;
|
||||||
```
|
```
|
||||||
|
|
||||||
## 列表
|
## 列表
|
||||||
|
|
|
@ -74,6 +74,7 @@ extern int64_t tsRpcQueueMemoryAllowed;
|
||||||
extern int32_t tsElectInterval;
|
extern int32_t tsElectInterval;
|
||||||
extern int32_t tsHeartbeatInterval;
|
extern int32_t tsHeartbeatInterval;
|
||||||
extern int32_t tsHeartbeatTimeout;
|
extern int32_t tsHeartbeatTimeout;
|
||||||
|
extern int32_t tsSnapReplMaxWaitN;
|
||||||
|
|
||||||
// vnode
|
// vnode
|
||||||
extern int64_t tsVndCommitMaxIntervalMs;
|
extern int64_t tsVndCommitMaxIntervalMs;
|
||||||
|
|
|
@ -53,9 +53,12 @@ typedef struct {
|
||||||
void* mgmt;
|
void* mgmt;
|
||||||
void* clientRpc;
|
void* clientRpc;
|
||||||
void* serverRpc;
|
void* serverRpc;
|
||||||
|
void* statusRpc;
|
||||||
|
void* syncRpc;
|
||||||
PutToQueueFp putToQueueFp;
|
PutToQueueFp putToQueueFp;
|
||||||
GetQueueSizeFp qsizeFp;
|
GetQueueSizeFp qsizeFp;
|
||||||
SendReqFp sendReqFp;
|
SendReqFp sendReqFp;
|
||||||
|
SendReqFp sendSyncReqFp;
|
||||||
SendRspFp sendRspFp;
|
SendRspFp sendRspFp;
|
||||||
RegisterBrokenLinkArgFp registerBrokenLinkArgFp;
|
RegisterBrokenLinkArgFp registerBrokenLinkArgFp;
|
||||||
ReleaseHandleFp releaseHandleFp;
|
ReleaseHandleFp releaseHandleFp;
|
||||||
|
@ -67,6 +70,7 @@ void tmsgSetDefault(const SMsgCb* msgcb);
|
||||||
int32_t tmsgPutToQueue(const SMsgCb* msgcb, EQueueType qtype, SRpcMsg* pMsg);
|
int32_t tmsgPutToQueue(const SMsgCb* msgcb, EQueueType qtype, SRpcMsg* pMsg);
|
||||||
int32_t tmsgGetQueueSize(const SMsgCb* msgcb, int32_t vgId, EQueueType qtype);
|
int32_t tmsgGetQueueSize(const SMsgCb* msgcb, int32_t vgId, EQueueType qtype);
|
||||||
int32_t tmsgSendReq(const SEpSet* epSet, SRpcMsg* pMsg);
|
int32_t tmsgSendReq(const SEpSet* epSet, SRpcMsg* pMsg);
|
||||||
|
int32_t tmsgSendSyncReq(const SEpSet* epSet, SRpcMsg* pMsg);
|
||||||
void tmsgSendRsp(SRpcMsg* pMsg);
|
void tmsgSendRsp(SRpcMsg* pMsg);
|
||||||
void tmsgRegisterBrokenLinkArg(SRpcMsg* pMsg);
|
void tmsgRegisterBrokenLinkArg(SRpcMsg* pMsg);
|
||||||
void tmsgReleaseHandle(SRpcHandleInfo* pHandle, int8_t type);
|
void tmsgReleaseHandle(SRpcHandleInfo* pHandle, int8_t type);
|
||||||
|
|
|
@ -75,6 +75,7 @@ static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) {
|
||||||
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
|
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
|
||||||
|
|
||||||
int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval);
|
int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval);
|
||||||
|
int64_t taosTimeGetIntervalEnd(int64_t ts, const SInterval* pInterval);
|
||||||
int32_t taosTimeCountIntervalForFill(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision, int32_t order);
|
int32_t taosTimeCountIntervalForFill(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision, int32_t order);
|
||||||
|
|
||||||
int32_t parseAbsoluteDuration(const char* token, int32_t tokenlen, int64_t* ts, char* unit, int32_t timePrecision);
|
int32_t parseAbsoluteDuration(const char* token, int32_t tokenlen, int64_t* ts, char* unit, int32_t timePrecision);
|
||||||
|
|
|
@ -373,6 +373,7 @@
|
||||||
#define TK_BATCH_SCAN 606
|
#define TK_BATCH_SCAN 606
|
||||||
#define TK_NO_BATCH_SCAN 607
|
#define TK_NO_BATCH_SCAN 607
|
||||||
#define TK_SORT_FOR_GROUP 608
|
#define TK_SORT_FOR_GROUP 608
|
||||||
|
#define TK_PARTITION_FIRST 609
|
||||||
|
|
||||||
|
|
||||||
#define TK_NK_NIL 65535
|
#define TK_NK_NIL 65535
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
|
|
||||||
// message process
|
// message process
|
||||||
int32_t tqStreamTaskStartAsync(SStreamMeta* pMeta, SMsgCb* cb, bool restart);
|
int32_t tqStreamTaskStartAsync(SStreamMeta* pMeta, SMsgCb* cb, bool restart);
|
||||||
|
int32_t tqStreamOneTaskStartAsync(SStreamMeta* pMeta, SMsgCb* cb, int64_t streamId, int32_t taskId);
|
||||||
int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pMsg, bool restored);
|
int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pMsg, bool restored);
|
||||||
int32_t tqStreamTaskProcessDispatchReq(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
int32_t tqStreamTaskProcessDispatchReq(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
||||||
int32_t tqStreamTaskProcessDispatchRsp(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
int32_t tqStreamTaskProcessDispatchRsp(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
||||||
|
@ -27,11 +28,14 @@ int32_t tqStreamTaskProcessScanHistoryFinishRsp(SStreamMeta* pMeta, SRpcMsg* pMs
|
||||||
int32_t tqStreamTaskProcessCheckReq(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
int32_t tqStreamTaskProcessCheckReq(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
||||||
int32_t tqStreamTaskProcessCheckRsp(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader);
|
int32_t tqStreamTaskProcessCheckRsp(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader);
|
||||||
int32_t tqStreamTaskProcessCheckpointReadyMsg(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
int32_t tqStreamTaskProcessCheckpointReadyMsg(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
||||||
int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, int64_t sversion, char* msg, int32_t msgLen, bool isLeader, bool restored);
|
int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sversion, char* msg, int32_t msgLen,
|
||||||
|
bool isLeader, bool restored);
|
||||||
int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen);
|
int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen);
|
||||||
int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader);
|
int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader);
|
||||||
int32_t tqStreamTaskResetStatus(SStreamMeta* pMeta);
|
int32_t tqStreamTaskResetStatus(SStreamMeta* pMeta, int32_t* numOfTasks);
|
||||||
int32_t tqStartTaskCompleteCallback(SStreamMeta* pMeta);
|
int32_t tqStartTaskCompleteCallback(SStreamMeta* pMeta);
|
||||||
int32_t tqStreamTaskProcessTaskResetReq(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
int32_t tqStreamTaskProcessTaskResetReq(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
||||||
|
int32_t tqStreamTaskProcessTaskPauseReq(SStreamMeta* pMeta, char* pMsg);
|
||||||
|
int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* pMsg, bool fromVnode);
|
||||||
|
|
||||||
#endif // TDENGINE_TQ_COMMON_H
|
#endif // TDENGINE_TQ_COMMON_H
|
||||||
|
|
|
@ -210,6 +210,7 @@ void* qExtractReaderFromStreamScanner(void* scanner);
|
||||||
int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner);
|
int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner);
|
||||||
|
|
||||||
int32_t qSetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo);
|
int32_t qSetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo);
|
||||||
|
int32_t qResetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo);
|
||||||
int32_t qStreamSourceScanParamForHistoryScanStep1(qTaskInfo_t tinfo, SVersionRange *pVerRange, STimeWindow* pWindow);
|
int32_t qStreamSourceScanParamForHistoryScanStep1(qTaskInfo_t tinfo, SVersionRange *pVerRange, STimeWindow* pWindow);
|
||||||
int32_t qStreamSourceScanParamForHistoryScanStep2(qTaskInfo_t tinfo, SVersionRange *pVerRange, STimeWindow* pWindow);
|
int32_t qStreamSourceScanParamForHistoryScanStep2(qTaskInfo_t tinfo, SVersionRange *pVerRange, STimeWindow* pWindow);
|
||||||
int32_t qStreamRecoverFinish(qTaskInfo_t tinfo);
|
int32_t qStreamRecoverFinish(qTaskInfo_t tinfo);
|
||||||
|
|
|
@ -259,9 +259,13 @@ EFuncDataRequired fmFuncDynDataRequired(int32_t funcId, void* pRes, STimeWindow*
|
||||||
int32_t fmGetFuncExecFuncs(int32_t funcId, SFuncExecFuncs* pFpSet);
|
int32_t fmGetFuncExecFuncs(int32_t funcId, SFuncExecFuncs* pFpSet);
|
||||||
int32_t fmGetScalarFuncExecFuncs(int32_t funcId, SScalarFuncExecFuncs* pFpSet);
|
int32_t fmGetScalarFuncExecFuncs(int32_t funcId, SScalarFuncExecFuncs* pFpSet);
|
||||||
int32_t fmGetUdafExecFuncs(int32_t funcId, SFuncExecFuncs* pFpSet);
|
int32_t fmGetUdafExecFuncs(int32_t funcId, SFuncExecFuncs* pFpSet);
|
||||||
|
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
int32_t fmSetInvertFunc(int32_t funcId, SFuncExecFuncs* pFpSet);
|
int32_t fmSetInvertFunc(int32_t funcId, SFuncExecFuncs* pFpSet);
|
||||||
int32_t fmSetNormalFunc(int32_t funcId, SFuncExecFuncs* pFpSet);
|
int32_t fmSetNormalFunc(int32_t funcId, SFuncExecFuncs* pFpSet);
|
||||||
bool fmIsInvertible(int32_t funcId);
|
bool fmIsInvertible(int32_t funcId);
|
||||||
|
#endif
|
||||||
|
|
||||||
char* fmGetFuncName(int32_t funcId);
|
char* fmGetFuncName(int32_t funcId);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|
|
@ -126,6 +126,7 @@ typedef enum EHintOption {
|
||||||
HINT_NO_BATCH_SCAN = 1,
|
HINT_NO_BATCH_SCAN = 1,
|
||||||
HINT_BATCH_SCAN,
|
HINT_BATCH_SCAN,
|
||||||
HINT_SORT_FOR_GROUP,
|
HINT_SORT_FOR_GROUP,
|
||||||
|
HINT_PARTITION_FIRST,
|
||||||
} EHintOption;
|
} EHintOption;
|
||||||
|
|
||||||
typedef struct SHintNode {
|
typedef struct SHintNode {
|
||||||
|
|
|
@ -50,10 +50,13 @@ extern "C" {
|
||||||
(_t)->hTaskInfo.id.streamId = 0; \
|
(_t)->hTaskInfo.id.streamId = 0; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define STREAM_EXEC_EXTRACT_DATA_IN_WAL_ID (-1)
|
#define STREAM_EXEC_T_EXTRACT_WAL_DATA (-1)
|
||||||
#define STREAM_EXEC_START_ALL_TASKS_ID (-2)
|
#define STREAM_EXEC_T_START_ALL_TASKS (-2)
|
||||||
#define STREAM_EXEC_RESTART_ALL_TASKS_ID (-3)
|
#define STREAM_EXEC_T_START_ONE_TASK (-3)
|
||||||
#define STREAM_EXEC_STOP_ALL_TASKS_ID (-4)
|
#define STREAM_EXEC_T_RESTART_ALL_TASKS (-4)
|
||||||
|
#define STREAM_EXEC_T_STOP_ALL_TASKS (-5)
|
||||||
|
#define STREAM_EXEC_T_RESUME_TASK (-6)
|
||||||
|
#define STREAM_EXEC_T_UPDATE_TASK_EPSET (-7)
|
||||||
|
|
||||||
typedef struct SStreamTask SStreamTask;
|
typedef struct SStreamTask SStreamTask;
|
||||||
typedef struct SStreamQueue SStreamQueue;
|
typedef struct SStreamQueue SStreamQueue;
|
||||||
|
@ -81,14 +84,12 @@ typedef enum ETaskStatus {
|
||||||
TASK_STATUS__HALT, // pause, but not be manipulated by user command
|
TASK_STATUS__HALT, // pause, but not be manipulated by user command
|
||||||
TASK_STATUS__PAUSE, // pause
|
TASK_STATUS__PAUSE, // pause
|
||||||
TASK_STATUS__CK, // stream task is in checkpoint status, no data are allowed to put into inputQ anymore
|
TASK_STATUS__CK, // stream task is in checkpoint status, no data are allowed to put into inputQ anymore
|
||||||
// TASK_STATUS__STREAM_SCAN_HISTORY,
|
|
||||||
} ETaskStatus;
|
} ETaskStatus;
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
TASK_SCHED_STATUS__INACTIVE = 1,
|
TASK_SCHED_STATUS__INACTIVE = 1,
|
||||||
TASK_SCHED_STATUS__WAITING,
|
TASK_SCHED_STATUS__WAITING,
|
||||||
TASK_SCHED_STATUS__ACTIVE,
|
TASK_SCHED_STATUS__ACTIVE,
|
||||||
TASK_SCHED_STATUS__FAILED,
|
|
||||||
TASK_SCHED_STATUS__DROPPING,
|
TASK_SCHED_STATUS__DROPPING,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -322,10 +323,11 @@ typedef struct SStreamStatus {
|
||||||
int8_t taskStatus;
|
int8_t taskStatus;
|
||||||
int8_t downstreamReady; // downstream tasks are all ready now, if this flag is set
|
int8_t downstreamReady; // downstream tasks are all ready now, if this flag is set
|
||||||
int8_t schedStatus;
|
int8_t schedStatus;
|
||||||
int8_t keepTaskStatus;
|
int32_t schedIdleTime; // idle time before invoke again
|
||||||
bool appendTranstateBlock; // has append the transfer state data block already, todo: remove it
|
int64_t lastExecTs; // last exec time stamp
|
||||||
|
int8_t statusBackup;
|
||||||
|
bool appendTranstateBlock; // has append the transfer state data block already
|
||||||
int32_t timerActive; // timer is active
|
int32_t timerActive; // timer is active
|
||||||
int8_t allowedAddInTimer; // allowed to add into timer
|
|
||||||
int32_t inScanHistorySentinel;
|
int32_t inScanHistorySentinel;
|
||||||
} SStreamStatus;
|
} SStreamStatus;
|
||||||
|
|
||||||
|
@ -366,7 +368,8 @@ typedef struct STaskQueue {
|
||||||
|
|
||||||
typedef struct STaskSchedInfo {
|
typedef struct STaskSchedInfo {
|
||||||
int8_t status;
|
int8_t status;
|
||||||
void* pTimer;
|
tmr_h pDelayTimer;
|
||||||
|
tmr_h pIdleTimer;
|
||||||
} STaskSchedInfo;
|
} STaskSchedInfo;
|
||||||
|
|
||||||
typedef struct SSinkRecorder {
|
typedef struct SSinkRecorder {
|
||||||
|
@ -481,6 +484,11 @@ typedef struct STaskUpdateInfo {
|
||||||
int32_t transId;
|
int32_t transId;
|
||||||
} STaskUpdateInfo;
|
} STaskUpdateInfo;
|
||||||
|
|
||||||
|
typedef struct SScanWalInfo {
|
||||||
|
int32_t scanCounter;
|
||||||
|
tmr_h scanTimer;
|
||||||
|
} SScanWalInfo;
|
||||||
|
|
||||||
// meta
|
// meta
|
||||||
typedef struct SStreamMeta {
|
typedef struct SStreamMeta {
|
||||||
char* path;
|
char* path;
|
||||||
|
@ -498,7 +506,7 @@ typedef struct SStreamMeta {
|
||||||
bool sendMsgBeforeClosing; // send hb to mnode before close all tasks when switch to follower.
|
bool sendMsgBeforeClosing; // send hb to mnode before close all tasks when switch to follower.
|
||||||
STaskStartInfo startInfo;
|
STaskStartInfo startInfo;
|
||||||
TdThreadRwlock lock;
|
TdThreadRwlock lock;
|
||||||
int32_t walScanCounter;
|
SScanWalInfo scanInfo;
|
||||||
void* streamBackend;
|
void* streamBackend;
|
||||||
int64_t streamBackendRid;
|
int64_t streamBackendRid;
|
||||||
SHashObj* pTaskDbUnique;
|
SHashObj* pTaskDbUnique;
|
||||||
|
@ -541,6 +549,7 @@ typedef struct {
|
||||||
SMsgHead head;
|
SMsgHead head;
|
||||||
int64_t streamId;
|
int64_t streamId;
|
||||||
int32_t taskId;
|
int32_t taskId;
|
||||||
|
int32_t reqType;
|
||||||
} SStreamTaskRunReq;
|
} SStreamTaskRunReq;
|
||||||
|
|
||||||
struct SStreamDispatchReq {
|
struct SStreamDispatchReq {
|
||||||
|
@ -553,6 +562,7 @@ struct SStreamDispatchReq {
|
||||||
int32_t upstreamTaskId;
|
int32_t upstreamTaskId;
|
||||||
int32_t upstreamChildId;
|
int32_t upstreamChildId;
|
||||||
int32_t upstreamNodeId;
|
int32_t upstreamNodeId;
|
||||||
|
int32_t upstreamRelTaskId;
|
||||||
int32_t blockNum;
|
int32_t blockNum;
|
||||||
int64_t totalLen;
|
int64_t totalLen;
|
||||||
SArray* dataLen; // SArray<int32_t>
|
SArray* dataLen; // SArray<int32_t>
|
||||||
|
@ -728,6 +738,11 @@ typedef struct SStreamTaskNodeUpdateMsg {
|
||||||
int32_t tEncodeStreamTaskUpdateMsg(SEncoder* pEncoder, const SStreamTaskNodeUpdateMsg* pMsg);
|
int32_t tEncodeStreamTaskUpdateMsg(SEncoder* pEncoder, const SStreamTaskNodeUpdateMsg* pMsg);
|
||||||
int32_t tDecodeStreamTaskUpdateMsg(SDecoder* pDecoder, SStreamTaskNodeUpdateMsg* pMsg);
|
int32_t tDecodeStreamTaskUpdateMsg(SDecoder* pDecoder, SStreamTaskNodeUpdateMsg* pMsg);
|
||||||
|
|
||||||
|
typedef struct SStreamTaskState {
|
||||||
|
ETaskStatus state;
|
||||||
|
char* name;
|
||||||
|
} SStreamTaskState;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int64_t streamId;
|
int64_t streamId;
|
||||||
int32_t downstreamTaskId;
|
int32_t downstreamTaskId;
|
||||||
|
@ -764,6 +779,7 @@ SStreamChildEpInfo* streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t
|
||||||
|
|
||||||
void streamTaskInputFail(SStreamTask* pTask);
|
void streamTaskInputFail(SStreamTask* pTask);
|
||||||
int32_t streamExecTask(SStreamTask* pTask);
|
int32_t streamExecTask(SStreamTask* pTask);
|
||||||
|
int32_t streamResumeTask(SStreamTask* pTask);
|
||||||
int32_t streamSchedExec(SStreamTask* pTask);
|
int32_t streamSchedExec(SStreamTask* pTask);
|
||||||
bool streamTaskShouldStop(const SStreamTask* pStatus);
|
bool streamTaskShouldStop(const SStreamTask* pStatus);
|
||||||
bool streamTaskShouldPause(const SStreamTask* pStatus);
|
bool streamTaskShouldPause(const SStreamTask* pStatus);
|
||||||
|
@ -771,7 +787,7 @@ bool streamTaskIsIdle(const SStreamTask* pTask);
|
||||||
bool streamTaskReadyToRun(const SStreamTask* pTask, char** pStatus);
|
bool streamTaskReadyToRun(const SStreamTask* pTask, char** pStatus);
|
||||||
|
|
||||||
char* createStreamTaskIdStr(int64_t streamId, int32_t taskId);
|
char* createStreamTaskIdStr(int64_t streamId, int32_t taskId);
|
||||||
ETaskStatus streamTaskGetStatus(const SStreamTask* pTask, char** pStr);
|
SStreamTaskState* streamTaskGetStatus(const SStreamTask* pTask);
|
||||||
const char* streamTaskGetStatusStr(ETaskStatus status);
|
const char* streamTaskGetStatusStr(ETaskStatus status);
|
||||||
void streamTaskResetStatus(SStreamTask* pTask);
|
void streamTaskResetStatus(SStreamTask* pTask);
|
||||||
void streamTaskSetStatusReady(SStreamTask* pTask);
|
void streamTaskSetStatusReady(SStreamTask* pTask);
|
||||||
|
@ -810,6 +826,7 @@ int32_t streamQueueGetNumOfItems(const SStreamQueue* pQueue);
|
||||||
|
|
||||||
// common
|
// common
|
||||||
int32_t streamRestoreParam(SStreamTask* pTask);
|
int32_t streamRestoreParam(SStreamTask* pTask);
|
||||||
|
int32_t streamResetParamForScanHistory(SStreamTask* pTask);
|
||||||
void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta);
|
void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta);
|
||||||
void streamTaskResume(SStreamTask* pTask);
|
void streamTaskResume(SStreamTask* pTask);
|
||||||
int32_t streamTaskSetUpstreamInfo(SStreamTask* pTask, const SStreamTask* pUpstreamTask);
|
int32_t streamTaskSetUpstreamInfo(SStreamTask* pTask, const SStreamTask* pUpstreamTask);
|
||||||
|
@ -855,7 +872,7 @@ int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta);
|
||||||
void streamMetaNotifyClose(SStreamMeta* pMeta);
|
void streamMetaNotifyClose(SStreamMeta* pMeta);
|
||||||
void streamMetaStartHb(SStreamMeta* pMeta);
|
void streamMetaStartHb(SStreamMeta* pMeta);
|
||||||
bool streamMetaTaskInTimer(SStreamMeta* pMeta);
|
bool streamMetaTaskInTimer(SStreamMeta* pMeta);
|
||||||
int32_t streamMetaUpdateTaskDownstreamStatus(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs,
|
int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs,
|
||||||
int64_t endTs, bool ready);
|
int64_t endTs, bool ready);
|
||||||
void streamMetaRLock(SStreamMeta* pMeta);
|
void streamMetaRLock(SStreamMeta* pMeta);
|
||||||
void streamMetaRUnLock(SStreamMeta* pMeta);
|
void streamMetaRUnLock(SStreamMeta* pMeta);
|
||||||
|
@ -867,6 +884,7 @@ void streamMetaUpdateStageRole(SStreamMeta* pMeta, int64_t stage, bool i
|
||||||
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta);
|
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta);
|
||||||
int32_t streamMetaStartAllTasks(SStreamMeta* pMeta);
|
int32_t streamMetaStartAllTasks(SStreamMeta* pMeta);
|
||||||
int32_t streamMetaStopAllTasks(SStreamMeta* pMeta);
|
int32_t streamMetaStopAllTasks(SStreamMeta* pMeta);
|
||||||
|
int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
||||||
|
|
||||||
// checkpoint
|
// checkpoint
|
||||||
int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
|
int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
|
||||||
|
|
|
@ -260,7 +260,7 @@ typedef enum ELogicConditionType {
|
||||||
#define TSDB_PASSWORD_LEN 32
|
#define TSDB_PASSWORD_LEN 32
|
||||||
#define TSDB_USET_PASSWORD_LEN 129
|
#define TSDB_USET_PASSWORD_LEN 129
|
||||||
#define TSDB_VERSION_LEN 32
|
#define TSDB_VERSION_LEN 32
|
||||||
#define TSDB_LABEL_LEN 12
|
#define TSDB_LABEL_LEN 16
|
||||||
#define TSDB_JOB_STATUS_LEN 32
|
#define TSDB_JOB_STATUS_LEN 32
|
||||||
|
|
||||||
#define TSDB_CLUSTER_ID_LEN 40
|
#define TSDB_CLUSTER_ID_LEN 40
|
||||||
|
|
|
@ -23,7 +23,6 @@
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
typedef struct SQWorkerPool SQWorkerPool;
|
|
||||||
typedef struct SWWorkerPool SWWorkerPool;
|
typedef struct SWWorkerPool SWWorkerPool;
|
||||||
|
|
||||||
typedef struct SQueueWorker {
|
typedef struct SQueueWorker {
|
||||||
|
@ -60,14 +59,14 @@ typedef struct SWWorker {
|
||||||
SWWorkerPool *pool;
|
SWWorkerPool *pool;
|
||||||
} SWWorker;
|
} SWWorker;
|
||||||
|
|
||||||
typedef struct SWWorkerPool {
|
struct SWWorkerPool {
|
||||||
int32_t max; // max number of workers
|
int32_t max; // max number of workers
|
||||||
int32_t num;
|
int32_t num;
|
||||||
int32_t nextId; // from 0 to max-1, cyclic
|
int32_t nextId; // from 0 to max-1, cyclic
|
||||||
const char *name;
|
const char *name;
|
||||||
SWWorker *workers;
|
SWWorker *workers;
|
||||||
TdThreadMutex mutex;
|
TdThreadMutex mutex;
|
||||||
} SWWorkerPool;
|
};
|
||||||
|
|
||||||
int32_t tQWorkerInit(SQWorkerPool *pool);
|
int32_t tQWorkerInit(SQWorkerPool *pool);
|
||||||
void tQWorkerCleanup(SQWorkerPool *pool);
|
void tQWorkerCleanup(SQWorkerPool *pool);
|
||||||
|
|
|
@ -1968,7 +1968,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
|
||||||
void* rspObj = NULL;
|
void* rspObj = NULL;
|
||||||
int64_t startTime = taosGetTimestampMs();
|
int64_t startTime = taosGetTimestampMs();
|
||||||
|
|
||||||
tscInfo("consumer:0x%" PRIx64 " start to poll at %" PRId64 ", timeout:%" PRId64, tmq->consumerId, startTime,
|
tscDebug("consumer:0x%" PRIx64 " start to poll at %" PRId64 ", timeout:%" PRId64, tmq->consumerId, startTime,
|
||||||
timeout);
|
timeout);
|
||||||
|
|
||||||
// in no topic status, delayed task also need to be processed
|
// in no topic status, delayed task also need to be processed
|
||||||
|
@ -2015,7 +2015,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
|
||||||
int64_t currentTime = taosGetTimestampMs();
|
int64_t currentTime = taosGetTimestampMs();
|
||||||
int64_t elapsedTime = currentTime - startTime;
|
int64_t elapsedTime = currentTime - startTime;
|
||||||
if (elapsedTime > timeout) {
|
if (elapsedTime > timeout) {
|
||||||
tscInfo("consumer:0x%" PRIx64 " (epoch %d) timeout, no rsp, start time %" PRId64 ", current time %" PRId64,
|
tscDebug("consumer:0x%" PRIx64 " (epoch %d) timeout, no rsp, start time %" PRId64 ", current time %" PRId64,
|
||||||
tmq->consumerId, tmq->epoch, startTime, currentTime);
|
tmq->consumerId, tmq->epoch, startTime, currentTime);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -939,8 +939,8 @@ TEST(clientCase, agg_query_tables) {
|
||||||
}
|
}
|
||||||
taos_free_result(pRes);
|
taos_free_result(pRes);
|
||||||
|
|
||||||
int64_t st = 1685959293000;
|
int64_t st = 1685959293299;
|
||||||
for (int32_t i = 0; i < 10000000; ++i) {
|
for (int32_t i = 0; i < 5; ++i) {
|
||||||
char s[256] = {0};
|
char s[256] = {0};
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
|
@ -954,16 +954,16 @@ TEST(clientCase, agg_query_tables) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
while (1) {
|
// while (1) {
|
||||||
sprintf(s, "insert into t2 values(%ld, %d)", st + i, i);
|
// sprintf(s, "insert into t2 values(%ld, %d)", st + i, i);
|
||||||
pRes = taos_query(pConn, s);
|
// pRes = taos_query(pConn, s);
|
||||||
int32_t ret = taos_errno(pRes);
|
// int32_t ret = taos_errno(pRes);
|
||||||
|
//
|
||||||
taos_free_result(pRes);
|
// taos_free_result(pRes);
|
||||||
if (ret == 0) {
|
// if (ret == 0) {
|
||||||
break;
|
// break;
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
// pRes = taos_query(pConn, "show table distributed tup");
|
// pRes = taos_query(pConn, "show table distributed tup");
|
||||||
|
|
|
@ -166,7 +166,7 @@ static const SSysDbTableSchema streamTaskSchema[] = {
|
||||||
{.name = "node_type", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
{.name = "node_type", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||||
{.name = "node_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
|
{.name = "node_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
|
||||||
{.name = "level", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
{.name = "level", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||||
{.name = "status", .bytes = 15 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
{.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||||
{.name = "stage", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
|
{.name = "stage", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
|
||||||
{.name = "in_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
{.name = "in_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||||
// {.name = "out_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
// {.name = "out_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||||
|
|
|
@ -58,7 +58,7 @@ int32_t tsNumOfMnodeQueryThreads = 4;
|
||||||
int32_t tsNumOfMnodeFetchThreads = 1;
|
int32_t tsNumOfMnodeFetchThreads = 1;
|
||||||
int32_t tsNumOfMnodeReadThreads = 1;
|
int32_t tsNumOfMnodeReadThreads = 1;
|
||||||
int32_t tsNumOfVnodeQueryThreads = 4;
|
int32_t tsNumOfVnodeQueryThreads = 4;
|
||||||
float tsRatioOfVnodeStreamThreads = 4.0;
|
float tsRatioOfVnodeStreamThreads = 1.0;
|
||||||
int32_t tsNumOfVnodeFetchThreads = 4;
|
int32_t tsNumOfVnodeFetchThreads = 4;
|
||||||
int32_t tsNumOfVnodeRsmaThreads = 2;
|
int32_t tsNumOfVnodeRsmaThreads = 2;
|
||||||
int32_t tsNumOfQnodeQueryThreads = 4;
|
int32_t tsNumOfQnodeQueryThreads = 4;
|
||||||
|
@ -72,6 +72,7 @@ int32_t tsPQSortMemThreshold = 16; // M
|
||||||
int32_t tsElectInterval = 25 * 1000;
|
int32_t tsElectInterval = 25 * 1000;
|
||||||
int32_t tsHeartbeatInterval = 1000;
|
int32_t tsHeartbeatInterval = 1000;
|
||||||
int32_t tsHeartbeatTimeout = 20 * 1000;
|
int32_t tsHeartbeatTimeout = 20 * 1000;
|
||||||
|
int32_t tsSnapReplMaxWaitN = 128;
|
||||||
|
|
||||||
// mnode
|
// mnode
|
||||||
int64_t tsMndSdbWriteDelta = 200;
|
int64_t tsMndSdbWriteDelta = 200;
|
||||||
|
@ -252,7 +253,7 @@ int32_t tsCompactPullupInterval = 10;
|
||||||
int32_t tsMqRebalanceInterval = 2;
|
int32_t tsMqRebalanceInterval = 2;
|
||||||
int32_t tsStreamCheckpointInterval = 60;
|
int32_t tsStreamCheckpointInterval = 60;
|
||||||
float tsSinkDataRate = 2.0;
|
float tsSinkDataRate = 2.0;
|
||||||
int32_t tsStreamNodeCheckInterval = 15;
|
int32_t tsStreamNodeCheckInterval = 16;
|
||||||
int32_t tsTtlUnit = 86400;
|
int32_t tsTtlUnit = 86400;
|
||||||
int32_t tsTtlPushIntervalSec = 10;
|
int32_t tsTtlPushIntervalSec = 10;
|
||||||
int32_t tsTrimVDbIntervalSec = 60 * 60; // interval of trimming db in all vgroups
|
int32_t tsTrimVDbIntervalSec = 60 * 60; // interval of trimming db in all vgroups
|
||||||
|
@ -621,7 +622,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
||||||
0)
|
0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
if (cfgAddFloat(pCfg, "ratioOfVnodeStreamThreads", tsRatioOfVnodeStreamThreads, 0.01, 100, CFG_SCOPE_SERVER,
|
if (cfgAddFloat(pCfg, "ratioOfVnodeStreamThreads", tsRatioOfVnodeStreamThreads, 0.01, 10, CFG_SCOPE_SERVER,
|
||||||
CFG_DYN_NONE) != 0)
|
CFG_DYN_NONE) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
@ -673,6 +674,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
||||||
if (cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER,
|
if (cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER,
|
||||||
CFG_DYN_NONE) != 0)
|
CFG_DYN_NONE) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
if (cfgAddInt32(pCfg, "syncSnapReplMaxWaitN", tsSnapReplMaxWaitN, 16,
|
||||||
|
(TSDB_SYNC_SNAP_BUFFER_SIZE >> 2), CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0)
|
||||||
|
return -1;
|
||||||
|
|
||||||
if (cfgAddInt64(pCfg, "mndSdbWriteDelta", tsMndSdbWriteDelta, 20, 10000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != 0)
|
if (cfgAddInt64(pCfg, "mndSdbWriteDelta", tsMndSdbWriteDelta, 20, 10000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -691,8 +695,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
||||||
|
|
||||||
if (cfgAddBool(pCfg, "audit", tsEnableAudit, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != 0) return -1;
|
if (cfgAddBool(pCfg, "audit", tsEnableAudit, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != 0) return -1;
|
||||||
if (cfgAddBool(pCfg, "auditCreateTable", tsEnableAuditCreateTable, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) return -1;
|
if (cfgAddBool(pCfg, "auditCreateTable", tsEnableAuditCreateTable, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) return -1;
|
||||||
if (cfgAddInt32(pCfg, "auditInterval", tsAuditInterval, 500, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0)
|
if (cfgAddInt32(pCfg, "auditInterval", tsAuditInterval, 500, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) return -1;
|
||||||
return -1;
|
|
||||||
|
|
||||||
if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, CFG_SCOPE_BOTH, CFG_DYN_NONE) != 0) return -1;
|
if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, CFG_SCOPE_BOTH, CFG_DYN_NONE) != 0) return -1;
|
||||||
if (cfgAddBool(pCfg, "telemetryReporting", tsEnableTelem, CFG_SCOPE_BOTH, CFG_DYN_ENT_SERVER) != 0) return -1;
|
if (cfgAddBool(pCfg, "telemetryReporting", tsEnableTelem, CFG_SCOPE_BOTH, CFG_DYN_ENT_SERVER) != 0) return -1;
|
||||||
|
@ -711,8 +714,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
||||||
if (cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) !=
|
if (cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) !=
|
||||||
0)
|
0)
|
||||||
return -1;
|
return -1;
|
||||||
if (cfgAddInt32(pCfg, "compactPullupInterval", tsCompactPullupInterval, 1, 10000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) !=
|
if (cfgAddInt32(pCfg, "compactPullupInterval", tsCompactPullupInterval, 1, 10000, CFG_SCOPE_SERVER,
|
||||||
0)
|
CFG_DYN_ENT_SERVER) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) !=
|
if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) !=
|
||||||
0)
|
0)
|
||||||
|
@ -1183,6 +1186,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
||||||
tsElectInterval = cfgGetItem(pCfg, "syncElectInterval")->i32;
|
tsElectInterval = cfgGetItem(pCfg, "syncElectInterval")->i32;
|
||||||
tsHeartbeatInterval = cfgGetItem(pCfg, "syncHeartbeatInterval")->i32;
|
tsHeartbeatInterval = cfgGetItem(pCfg, "syncHeartbeatInterval")->i32;
|
||||||
tsHeartbeatTimeout = cfgGetItem(pCfg, "syncHeartbeatTimeout")->i32;
|
tsHeartbeatTimeout = cfgGetItem(pCfg, "syncHeartbeatTimeout")->i32;
|
||||||
|
tsSnapReplMaxWaitN = cfgGetItem(pCfg, "syncSnapReplMaxWaitN")->i32;
|
||||||
|
|
||||||
tsMndSdbWriteDelta = cfgGetItem(pCfg, "mndSdbWriteDelta")->i64;
|
tsMndSdbWriteDelta = cfgGetItem(pCfg, "mndSdbWriteDelta")->i64;
|
||||||
tsMndLogRetention = cfgGetItem(pCfg, "mndLogRetention")->i64;
|
tsMndLogRetention = cfgGetItem(pCfg, "mndLogRetention")->i64;
|
||||||
|
@ -1465,8 +1469,7 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, char *name) {
|
||||||
{"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag},
|
{"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag},
|
||||||
};
|
};
|
||||||
|
|
||||||
static OptionNameAndVar options[] = {
|
static OptionNameAndVar options[] = {{"audit", &tsEnableAudit},
|
||||||
{"audit", &tsEnableAudit},
|
|
||||||
{"asynclog", &tsAsyncLog},
|
{"asynclog", &tsAsyncLog},
|
||||||
{"disableStream", &tsDisableStream},
|
{"disableStream", &tsDisableStream},
|
||||||
{"enableWhiteList", &tsEnableWhiteList},
|
{"enableWhiteList", &tsEnableWhiteList},
|
||||||
|
@ -1497,8 +1500,7 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, char *name) {
|
||||||
{"s3PageCacheSize", &tsS3PageCacheSize},
|
{"s3PageCacheSize", &tsS3PageCacheSize},
|
||||||
{"s3UploadDelaySec", &tsS3UploadDelaySec},
|
{"s3UploadDelaySec", &tsS3UploadDelaySec},
|
||||||
{"supportVnodes", &tsNumOfSupportVnodes},
|
{"supportVnodes", &tsNumOfSupportVnodes},
|
||||||
{"experimental", &tsExperimental}
|
{"experimental", &tsExperimental}};
|
||||||
};
|
|
||||||
|
|
||||||
if (taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true) != 0) {
|
if (taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true) != 0) {
|
||||||
taosCfgSetOption(options, tListLen(options), pItem, false);
|
taosCfgSetOption(options, tListLen(options), pItem, false);
|
||||||
|
@ -1695,8 +1697,7 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
|
||||||
{"uDebugFlag", &uDebugFlag}, {"simDebugFlag", &simDebugFlag},
|
{"uDebugFlag", &uDebugFlag}, {"simDebugFlag", &simDebugFlag},
|
||||||
};
|
};
|
||||||
|
|
||||||
static OptionNameAndVar options[] = {
|
static OptionNameAndVar options[] = {{"asyncLog", &tsAsyncLog},
|
||||||
{"asyncLog", &tsAsyncLog},
|
|
||||||
{"assert", &tsAssert},
|
{"assert", &tsAssert},
|
||||||
{"compressMsgSize", &tsCompressMsgSize},
|
{"compressMsgSize", &tsCompressMsgSize},
|
||||||
{"countAlwaysReturnValue", &tsCountAlwaysReturnValue},
|
{"countAlwaysReturnValue", &tsCountAlwaysReturnValue},
|
||||||
|
@ -1720,8 +1721,7 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
|
||||||
{"shellActivityTimer", &tsShellActivityTimer},
|
{"shellActivityTimer", &tsShellActivityTimer},
|
||||||
{"slowLogThreshold", &tsSlowLogThreshold},
|
{"slowLogThreshold", &tsSlowLogThreshold},
|
||||||
{"useAdapter", &tsUseAdapter},
|
{"useAdapter", &tsUseAdapter},
|
||||||
{"experimental", &tsExperimental}
|
{"experimental", &tsExperimental}};
|
||||||
};
|
|
||||||
|
|
||||||
if (taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true) != 0) {
|
if (taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true) != 0) {
|
||||||
taosCfgSetOption(options, tListLen(options), pItem, false);
|
taosCfgSetOption(options, tListLen(options), pItem, false);
|
||||||
|
|
|
@ -8334,7 +8334,7 @@ int32_t tDecodeMqMetaRsp(SDecoder *pDecoder, SMqMetaRsp *pRsp) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tEncodeMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) {
|
int32_t tEncodeMqDataRspCommon(SEncoder *pEncoder, const SMqDataRsp *pRsp) {
|
||||||
if (tEncodeSTqOffsetVal(pEncoder, &pRsp->reqOffset) < 0) return -1;
|
if (tEncodeSTqOffsetVal(pEncoder, &pRsp->reqOffset) < 0) return -1;
|
||||||
if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1;
|
if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1;
|
||||||
if (tEncodeI32(pEncoder, pRsp->blockNum) < 0) return -1;
|
if (tEncodeI32(pEncoder, pRsp->blockNum) < 0) return -1;
|
||||||
|
@ -8356,11 +8356,16 @@ int32_t tEncodeMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t tEncodeMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) {
|
||||||
|
if (tEncodeMqDataRspCommon(pEncoder, pRsp) < 0) return -1;
|
||||||
if (tEncodeI64(pEncoder, pRsp->sleepTime) < 0) return -1;
|
if (tEncodeI64(pEncoder, pRsp->sleepTime) < 0) return -1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tDecodeMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) {
|
int32_t tDecodeMqDataRspCommon(SDecoder *pDecoder, SMqDataRsp *pRsp) {
|
||||||
if (tDecodeSTqOffsetVal(pDecoder, &pRsp->reqOffset) < 0) return -1;
|
if (tDecodeSTqOffsetVal(pDecoder, &pRsp->reqOffset) < 0) return -1;
|
||||||
if (tDecodeSTqOffsetVal(pDecoder, &pRsp->rspOffset) < 0) return -1;
|
if (tDecodeSTqOffsetVal(pDecoder, &pRsp->rspOffset) < 0) return -1;
|
||||||
if (tDecodeI32(pDecoder, &pRsp->blockNum) < 0) return -1;
|
if (tDecodeI32(pDecoder, &pRsp->blockNum) < 0) return -1;
|
||||||
|
@ -8402,7 +8407,15 @@ int32_t tDecodeMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t tDecodeMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) {
|
||||||
|
if (tDecodeMqDataRspCommon(pDecoder, pRsp) < 0) return -1;
|
||||||
|
if (!tDecodeIsEnd(pDecoder)) {
|
||||||
if (tDecodeI64(pDecoder, &pRsp->sleepTime) < 0) return -1;
|
if (tDecodeI64(pDecoder, &pRsp->sleepTime) < 0) return -1;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -8418,7 +8431,7 @@ void tDeleteMqDataRsp(SMqDataRsp *pRsp) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const STaosxRsp *pRsp) {
|
int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const STaosxRsp *pRsp) {
|
||||||
if (tEncodeMqDataRsp(pEncoder, (const SMqDataRsp *)pRsp) < 0) return -1;
|
if (tEncodeMqDataRspCommon(pEncoder, (const SMqDataRsp *)pRsp) < 0) return -1;
|
||||||
|
|
||||||
if (tEncodeI32(pEncoder, pRsp->createTableNum) < 0) return -1;
|
if (tEncodeI32(pEncoder, pRsp->createTableNum) < 0) return -1;
|
||||||
if (pRsp->createTableNum) {
|
if (pRsp->createTableNum) {
|
||||||
|
@ -8432,7 +8445,7 @@ int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const STaosxRsp *pRsp) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tDecodeSTaosxRsp(SDecoder *pDecoder, STaosxRsp *pRsp) {
|
int32_t tDecodeSTaosxRsp(SDecoder *pDecoder, STaosxRsp *pRsp) {
|
||||||
if (tDecodeMqDataRsp(pDecoder, (SMqDataRsp *)pRsp) < 0) return -1;
|
if (tDecodeMqDataRspCommon(pDecoder, (SMqDataRsp*)pRsp) < 0) return -1;
|
||||||
|
|
||||||
if (tDecodeI32(pDecoder, &pRsp->createTableNum) < 0) return -1;
|
if (tDecodeI32(pDecoder, &pRsp->createTableNum) < 0) return -1;
|
||||||
if (pRsp->createTableNum) {
|
if (pRsp->createTableNum) {
|
||||||
|
@ -8447,6 +8460,7 @@ int32_t tDecodeSTaosxRsp(SDecoder *pDecoder, STaosxRsp *pRsp) {
|
||||||
taosArrayPush(pRsp->createTableReq, &pCreate);
|
taosArrayPush(pRsp->createTableReq, &pCreate);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -683,6 +683,10 @@ int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* durati
|
||||||
return getDuration(*duration, *unit, duration, timePrecision);
|
return getDuration(*duration, *unit, duration, timePrecision);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool taosIsLeapYear(int32_t year) {
|
||||||
|
return (year % 4 == 0 && (year % 100 != 0 || year % 400 == 0));
|
||||||
|
}
|
||||||
|
|
||||||
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) {
|
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) {
|
||||||
if (duration == 0) {
|
if (duration == 0) {
|
||||||
return t;
|
return t;
|
||||||
|
@ -702,7 +706,13 @@ int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) {
|
||||||
int32_t mon = tm.tm_year * 12 + tm.tm_mon + (int32_t)numOfMonth;
|
int32_t mon = tm.tm_year * 12 + tm.tm_mon + (int32_t)numOfMonth;
|
||||||
tm.tm_year = mon / 12;
|
tm.tm_year = mon / 12;
|
||||||
tm.tm_mon = mon % 12;
|
tm.tm_mon = mon % 12;
|
||||||
|
int daysOfMonth[] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
|
||||||
|
if (taosIsLeapYear(1900 + tm.tm_year)) {
|
||||||
|
daysOfMonth[1] = 29;
|
||||||
|
}
|
||||||
|
if (tm.tm_mday > daysOfMonth[tm.tm_mon]) {
|
||||||
|
tm.tm_mday = daysOfMonth[tm.tm_mon];
|
||||||
|
}
|
||||||
return (int64_t)(taosMktime(&tm) * TSDB_TICK_PER_SECOND(precision) + fraction);
|
return (int64_t)(taosMktime(&tm) * TSDB_TICK_PER_SECOND(precision) + fraction);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -872,23 +882,33 @@ int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval) {
|
||||||
ASSERT(pInterval->offset >= 0);
|
ASSERT(pInterval->offset >= 0);
|
||||||
|
|
||||||
if (pInterval->offset > 0) {
|
if (pInterval->offset > 0) {
|
||||||
start = taosTimeAdd(start, pInterval->offset, pInterval->offsetUnit, precision);
|
|
||||||
|
|
||||||
// try to move current window to the left-hande-side, due to the offset effect.
|
// try to move current window to the left-hande-side, due to the offset effect.
|
||||||
int64_t end = taosTimeAdd(start, pInterval->interval, pInterval->intervalUnit, precision) - 1;
|
int64_t newe = taosTimeAdd(start, pInterval->interval, pInterval->intervalUnit, precision) - 1;
|
||||||
|
int64_t slidingStart = start;
|
||||||
int64_t newe = end;
|
|
||||||
while (newe >= ts) {
|
while (newe >= ts) {
|
||||||
end = newe;
|
start = slidingStart;
|
||||||
newe = taosTimeAdd(newe, -pInterval->sliding, pInterval->slidingUnit, precision);
|
slidingStart = taosTimeAdd(slidingStart, -pInterval->sliding, pInterval->slidingUnit, precision);
|
||||||
|
int64_t slidingEnd = taosTimeAdd(slidingStart, pInterval->interval, pInterval->intervalUnit, precision) - 1;
|
||||||
|
newe = taosTimeAdd(slidingEnd, pInterval->offset, pInterval->offsetUnit, precision);
|
||||||
}
|
}
|
||||||
|
start = taosTimeAdd(start, pInterval->offset, pInterval->offsetUnit, precision);
|
||||||
start = taosTimeAdd(end, -pInterval->interval, pInterval->intervalUnit, precision) + 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return start;
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// used together with taosTimeTruncate. when offset is great than zero, slide-start/slide-end is the anchor point
|
||||||
|
int64_t taosTimeGetIntervalEnd(int64_t intervalStart, const SInterval* pInterval) {
|
||||||
|
if (pInterval->offset > 0) {
|
||||||
|
int64_t slideStart = taosTimeAdd(intervalStart, -1 * pInterval->offset, pInterval->offsetUnit, pInterval->precision);
|
||||||
|
int64_t slideEnd = taosTimeAdd(slideStart, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
|
||||||
|
int64_t result = taosTimeAdd(slideEnd, pInterval->offset, pInterval->offsetUnit, pInterval->precision);
|
||||||
|
return result;
|
||||||
|
} else {
|
||||||
|
int64_t result = taosTimeAdd(intervalStart, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
// internal function, when program is paused in debugger,
|
// internal function, when program is paused in debugger,
|
||||||
// one can call this function from debugger to print a
|
// one can call this function from debugger to print a
|
||||||
// timestamp as human readable string, for example (gdb):
|
// timestamp as human readable string, for example (gdb):
|
||||||
|
|
|
@ -161,7 +161,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
|
||||||
SEpSet epSet = {0};
|
SEpSet epSet = {0};
|
||||||
int8_t epUpdated = 0;
|
int8_t epUpdated = 0;
|
||||||
dmGetMnodeEpSet(pMgmt->pData, &epSet);
|
dmGetMnodeEpSet(pMgmt->pData, &epSet);
|
||||||
rpcSendRecvWithTimeout(pMgmt->msgCb.clientRpc, &epSet, &rpcMsg, &rpcRsp, &epUpdated, 5000);
|
rpcSendRecvWithTimeout(pMgmt->msgCb.statusRpc, &epSet, &rpcMsg, &rpcRsp, &epUpdated, tsStatusInterval * 5 * 1000);
|
||||||
if (rpcRsp.code != 0) {
|
if (rpcRsp.code != 0) {
|
||||||
dmRotateMnodeEpSet(pMgmt->pData);
|
dmRotateMnodeEpSet(pMgmt->pData);
|
||||||
char tbuf[512];
|
char tbuf[512];
|
||||||
|
|
|
@ -47,6 +47,7 @@ static void *dmStatusThreadFp(void *param) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SDmNotifyHandle dmNotifyHdl = {.state = 0};
|
SDmNotifyHandle dmNotifyHdl = {.state = 0};
|
||||||
|
|
||||||
static void *dmNotifyThreadFp(void *param) {
|
static void *dmNotifyThreadFp(void *param) {
|
||||||
SDnodeMgmt *pMgmt = param;
|
SDnodeMgmt *pMgmt = param;
|
||||||
setThreadName("dnode-notify");
|
setThreadName("dnode-notify");
|
||||||
|
|
|
@ -407,12 +407,8 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
|
||||||
if (tWWorkerInit(pFPool) != 0) return -1;
|
if (tWWorkerInit(pFPool) != 0) return -1;
|
||||||
|
|
||||||
SSingleWorkerCfg mgmtCfg = {
|
SSingleWorkerCfg mgmtCfg = {
|
||||||
.min = 1,
|
.min = 1, .max = 1, .name = "vnode-mgmt", .fp = (FItem)vmProcessMgmtQueue, .param = pMgmt};
|
||||||
.max = 1,
|
|
||||||
.name = "vnode-mgmt",
|
|
||||||
.fp = (FItem)vmProcessMgmtQueue,
|
|
||||||
.param = pMgmt,
|
|
||||||
};
|
|
||||||
if (tSingleWorkerInit(&pMgmt->mgmtWorker, &mgmtCfg) != 0) return -1;
|
if (tSingleWorkerInit(&pMgmt->mgmtWorker, &mgmtCfg) != 0) return -1;
|
||||||
|
|
||||||
dDebug("vnode workers are initialized");
|
dDebug("vnode workers are initialized");
|
||||||
|
|
|
@ -48,6 +48,8 @@ typedef struct {
|
||||||
typedef struct {
|
typedef struct {
|
||||||
void *serverRpc;
|
void *serverRpc;
|
||||||
void *clientRpc;
|
void *clientRpc;
|
||||||
|
void *statusRpc;
|
||||||
|
void *syncRpc;
|
||||||
SDnodeHandle msgHandles[TDMT_MAX];
|
SDnodeHandle msgHandles[TDMT_MAX];
|
||||||
} SDnodeTrans;
|
} SDnodeTrans;
|
||||||
|
|
||||||
|
@ -136,8 +138,10 @@ int32_t dmInitServer(SDnode *pDnode);
|
||||||
void dmCleanupServer(SDnode *pDnode);
|
void dmCleanupServer(SDnode *pDnode);
|
||||||
int32_t dmInitClient(SDnode *pDnode);
|
int32_t dmInitClient(SDnode *pDnode);
|
||||||
int32_t dmInitStatusClient(SDnode *pDnode);
|
int32_t dmInitStatusClient(SDnode *pDnode);
|
||||||
|
int32_t dmInitSyncClient(SDnode *pDnode);
|
||||||
void dmCleanupClient(SDnode *pDnode);
|
void dmCleanupClient(SDnode *pDnode);
|
||||||
void dmCleanupStatusClient(SDnode *pDnode);
|
void dmCleanupStatusClient(SDnode *pDnode);
|
||||||
|
void dmCleanupSyncClient(SDnode *pDnode);
|
||||||
SMsgCb dmGetMsgcb(SDnode *pDnode);
|
SMsgCb dmGetMsgcb(SDnode *pDnode);
|
||||||
#ifdef TD_MODULE_OPTIMIZE
|
#ifdef TD_MODULE_OPTIMIZE
|
||||||
int32_t dmInitMsgHandle(SDnode *pDnode, SMgmtWrapper *wrappers);
|
int32_t dmInitMsgHandle(SDnode *pDnode, SMgmtWrapper *wrappers);
|
||||||
|
|
|
@ -28,9 +28,6 @@
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
extern int32_t streamTimerInit();
|
|
||||||
extern void streamTimerCleanUp();
|
|
||||||
|
|
||||||
static SDnode globalDnode = {0};
|
static SDnode globalDnode = {0};
|
||||||
|
|
||||||
SDnode *dmInstance() { return &globalDnode; }
|
SDnode *dmInstance() { return &globalDnode; }
|
||||||
|
@ -169,7 +166,6 @@ int32_t dmInit() {
|
||||||
#if defined(USE_S3)
|
#if defined(USE_S3)
|
||||||
if (s3Begin() != 0) return -1;
|
if (s3Begin() != 0) return -1;
|
||||||
#endif
|
#endif
|
||||||
if (streamTimerInit() != 0) return -1;
|
|
||||||
|
|
||||||
dInfo("dnode env is initialized");
|
dInfo("dnode env is initialized");
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -196,10 +192,10 @@ void dmCleanup() {
|
||||||
udfStopUdfd();
|
udfStopUdfd();
|
||||||
taosStopCacheRefreshWorker();
|
taosStopCacheRefreshWorker();
|
||||||
dmDiskClose();
|
dmDiskClose();
|
||||||
|
|
||||||
#if defined(USE_S3)
|
#if defined(USE_S3)
|
||||||
s3End();
|
s3End();
|
||||||
#endif
|
#endif
|
||||||
streamTimerCleanUp();
|
|
||||||
|
|
||||||
dInfo("dnode env is cleaned up");
|
dInfo("dnode env is cleaned up");
|
||||||
|
|
||||||
|
|
|
@ -90,9 +90,13 @@ int32_t dmInitDnode(SDnode *pDnode) {
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
indexInit(tsNumOfCommitThreads);
|
indexInit(tsNumOfCommitThreads);
|
||||||
streamMetaInit();
|
streamMetaInit();
|
||||||
|
|
||||||
|
dmInitStatusClient(pDnode);
|
||||||
|
dmInitSyncClient(pDnode);
|
||||||
|
|
||||||
dmReportStartup("dnode-transport", "initialized");
|
dmReportStartup("dnode-transport", "initialized");
|
||||||
dDebug("dnode is created, ptr:%p", pDnode);
|
dDebug("dnode is created, ptr:%p", pDnode);
|
||||||
code = 0;
|
code = 0;
|
||||||
|
@ -108,11 +112,15 @@ _OVER:
|
||||||
}
|
}
|
||||||
|
|
||||||
void dmCleanupDnode(SDnode *pDnode) {
|
void dmCleanupDnode(SDnode *pDnode) {
|
||||||
if (pDnode == NULL) return;
|
if (pDnode == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
dmCleanupClient(pDnode);
|
dmCleanupClient(pDnode);
|
||||||
dmCleanupStatusClient(pDnode);
|
dmCleanupStatusClient(pDnode);
|
||||||
|
dmCleanupSyncClient(pDnode);
|
||||||
dmCleanupServer(pDnode);
|
dmCleanupServer(pDnode);
|
||||||
|
|
||||||
dmClearVars(pDnode);
|
dmClearVars(pDnode);
|
||||||
rpcCleanup();
|
rpcCleanup();
|
||||||
streamMetaCleanup();
|
streamMetaCleanup();
|
||||||
|
|
|
@ -322,6 +322,23 @@ static inline int32_t dmSendReq(const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
static inline int32_t dmSendSyncReq(const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
||||||
|
SDnode *pDnode = dmInstance();
|
||||||
|
if (pDnode->status != DND_STAT_RUNNING && pMsg->msgType < TDMT_SYNC_MSG) {
|
||||||
|
rpcFreeCont(pMsg->pCont);
|
||||||
|
pMsg->pCont = NULL;
|
||||||
|
if (pDnode->status == DND_STAT_INIT) {
|
||||||
|
terrno = TSDB_CODE_APP_IS_STARTING;
|
||||||
|
} else {
|
||||||
|
terrno = TSDB_CODE_APP_IS_STOPPING;
|
||||||
|
}
|
||||||
|
dError("failed to send rpc msg:%s since %s, handle:%p", TMSG_INFO(pMsg->msgType), terrstr(), pMsg->info.handle);
|
||||||
|
return -1;
|
||||||
|
} else {
|
||||||
|
rpcSendRequest(pDnode->trans.syncRpc, pEpSet, pMsg, NULL);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static inline void dmRegisterBrokenLinkArg(SRpcMsg *pMsg) { rpcRegisterBrokenLinkArg(pMsg); }
|
static inline void dmRegisterBrokenLinkArg(SRpcMsg *pMsg) { rpcRegisterBrokenLinkArg(pMsg); }
|
||||||
|
|
||||||
|
@ -346,8 +363,8 @@ int32_t dmInitClient(SDnode *pDnode) {
|
||||||
SDnodeTrans *pTrans = &pDnode->trans;
|
SDnodeTrans *pTrans = &pDnode->trans;
|
||||||
|
|
||||||
SRpcInit rpcInit = {0};
|
SRpcInit rpcInit = {0};
|
||||||
rpcInit.label = "DND-C";
|
rpcInit.label = "DNODE-CLI";
|
||||||
rpcInit.numOfThreads = tsNumOfRpcThreads;
|
rpcInit.numOfThreads = tsNumOfRpcThreads / 2;
|
||||||
rpcInit.cfp = (RpcCfp)dmProcessRpcMsg;
|
rpcInit.cfp = (RpcCfp)dmProcessRpcMsg;
|
||||||
rpcInit.sessions = 1024;
|
rpcInit.sessions = 1024;
|
||||||
rpcInit.connType = TAOS_CONN_CLIENT;
|
rpcInit.connType = TAOS_CONN_CLIENT;
|
||||||
|
@ -366,7 +383,7 @@ int32_t dmInitClient(SDnode *pDnode) {
|
||||||
rpcInit.failFastThreshold = 3; // failed threshold
|
rpcInit.failFastThreshold = 3; // failed threshold
|
||||||
rpcInit.ffp = dmFailFastFp;
|
rpcInit.ffp = dmFailFastFp;
|
||||||
|
|
||||||
int32_t connLimitNum = tsNumOfRpcSessions / (tsNumOfRpcThreads * 3);
|
int32_t connLimitNum = tsNumOfRpcSessions / (tsNumOfRpcThreads * 3) / 2;
|
||||||
connLimitNum = TMAX(connLimitNum, 10);
|
connLimitNum = TMAX(connLimitNum, 10);
|
||||||
connLimitNum = TMIN(connLimitNum, 500);
|
connLimitNum = TMIN(connLimitNum, 500);
|
||||||
|
|
||||||
|
@ -390,7 +407,7 @@ int32_t dmInitStatusClient(SDnode *pDnode) {
|
||||||
SDnodeTrans *pTrans = &pDnode->trans;
|
SDnodeTrans *pTrans = &pDnode->trans;
|
||||||
|
|
||||||
SRpcInit rpcInit = {0};
|
SRpcInit rpcInit = {0};
|
||||||
rpcInit.label = "DND-STATUS";
|
rpcInit.label = "DNODE-STA-CLI";
|
||||||
rpcInit.numOfThreads = 1;
|
rpcInit.numOfThreads = 1;
|
||||||
rpcInit.cfp = (RpcCfp)dmProcessRpcMsg;
|
rpcInit.cfp = (RpcCfp)dmProcessRpcMsg;
|
||||||
rpcInit.sessions = 1024;
|
rpcInit.sessions = 1024;
|
||||||
|
@ -421,16 +438,61 @@ int32_t dmInitStatusClient(SDnode *pDnode) {
|
||||||
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
|
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
|
||||||
taosVersionStrToInt(version, &(rpcInit.compatibilityVer));
|
taosVersionStrToInt(version, &(rpcInit.compatibilityVer));
|
||||||
|
|
||||||
// pTrans->statusClientRpc = rpcOpen(&rpcInit);
|
pTrans->statusRpc = rpcOpen(&rpcInit);
|
||||||
// if (pTrans->statusClientRpc == NULL) {
|
if (pTrans->statusRpc == NULL) {
|
||||||
// dError("failed to init dnode rpc status client");
|
dError("failed to init dnode rpc status client");
|
||||||
// return -1;
|
return -1;
|
||||||
// }
|
}
|
||||||
|
|
||||||
dDebug("dnode rpc status client is initialized");
|
dDebug("dnode rpc status client is initialized");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t dmInitSyncClient(SDnode *pDnode) {
|
||||||
|
SDnodeTrans *pTrans = &pDnode->trans;
|
||||||
|
|
||||||
|
SRpcInit rpcInit = {0};
|
||||||
|
rpcInit.label = "DNODE-SYNC-CLI";
|
||||||
|
rpcInit.numOfThreads = tsNumOfRpcThreads / 2;
|
||||||
|
rpcInit.cfp = (RpcCfp)dmProcessRpcMsg;
|
||||||
|
rpcInit.sessions = 1024;
|
||||||
|
rpcInit.connType = TAOS_CONN_CLIENT;
|
||||||
|
rpcInit.user = TSDB_DEFAULT_USER;
|
||||||
|
rpcInit.idleTime = tsShellActivityTimer * 1000;
|
||||||
|
rpcInit.parent = pDnode;
|
||||||
|
rpcInit.rfp = rpcRfp;
|
||||||
|
rpcInit.compressSize = tsCompressMsgSize;
|
||||||
|
|
||||||
|
rpcInit.retryMinInterval = tsRedirectPeriod;
|
||||||
|
rpcInit.retryStepFactor = tsRedirectFactor;
|
||||||
|
rpcInit.retryMaxInterval = tsRedirectMaxPeriod;
|
||||||
|
rpcInit.retryMaxTimeout = tsMaxRetryWaitTime;
|
||||||
|
|
||||||
|
rpcInit.failFastInterval = 5000; // interval threshold(ms)
|
||||||
|
rpcInit.failFastThreshold = 3; // failed threshold
|
||||||
|
rpcInit.ffp = dmFailFastFp;
|
||||||
|
|
||||||
|
int32_t connLimitNum = tsNumOfRpcSessions / (tsNumOfRpcThreads * 3) / 2;
|
||||||
|
connLimitNum = TMAX(connLimitNum, 10);
|
||||||
|
connLimitNum = TMIN(connLimitNum, 500);
|
||||||
|
|
||||||
|
rpcInit.connLimitNum = connLimitNum;
|
||||||
|
rpcInit.connLimitLock = 1;
|
||||||
|
rpcInit.supportBatch = 1;
|
||||||
|
rpcInit.batchSize = 8 * 1024;
|
||||||
|
rpcInit.timeToGetConn = tsTimeToGetAvailableConn;
|
||||||
|
taosVersionStrToInt(version, &(rpcInit.compatibilityVer));
|
||||||
|
|
||||||
|
pTrans->syncRpc = rpcOpen(&rpcInit);
|
||||||
|
if (pTrans->syncRpc == NULL) {
|
||||||
|
dError("failed to init dnode rpc sync client");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
dDebug("dnode rpc sync client is initialized");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
void dmCleanupClient(SDnode *pDnode) {
|
void dmCleanupClient(SDnode *pDnode) {
|
||||||
SDnodeTrans *pTrans = &pDnode->trans;
|
SDnodeTrans *pTrans = &pDnode->trans;
|
||||||
if (pTrans->clientRpc) {
|
if (pTrans->clientRpc) {
|
||||||
|
@ -441,11 +503,19 @@ void dmCleanupClient(SDnode *pDnode) {
|
||||||
}
|
}
|
||||||
void dmCleanupStatusClient(SDnode *pDnode) {
|
void dmCleanupStatusClient(SDnode *pDnode) {
|
||||||
SDnodeTrans *pTrans = &pDnode->trans;
|
SDnodeTrans *pTrans = &pDnode->trans;
|
||||||
// if (pTrans->statusClientRpc) {
|
if (pTrans->statusRpc) {
|
||||||
// rpcClose(pTrans->statusClientRpc);
|
rpcClose(pTrans->statusRpc);
|
||||||
// pTrans->statusClientRpc = NULL;
|
pTrans->statusRpc = NULL;
|
||||||
// dDebug("dnode rpc status client is closed");
|
dDebug("dnode rpc status client is closed");
|
||||||
// }
|
}
|
||||||
|
}
|
||||||
|
void dmCleanupSyncClient(SDnode *pDnode) {
|
||||||
|
SDnodeTrans *pTrans = &pDnode->trans;
|
||||||
|
if (pTrans->syncRpc) {
|
||||||
|
rpcClose(pTrans->syncRpc);
|
||||||
|
pTrans->syncRpc = NULL;
|
||||||
|
dDebug("dnode rpc sync client is closed");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t dmInitServer(SDnode *pDnode) {
|
int32_t dmInitServer(SDnode *pDnode) {
|
||||||
|
@ -486,7 +556,10 @@ SMsgCb dmGetMsgcb(SDnode *pDnode) {
|
||||||
SMsgCb msgCb = {
|
SMsgCb msgCb = {
|
||||||
.clientRpc = pDnode->trans.clientRpc,
|
.clientRpc = pDnode->trans.clientRpc,
|
||||||
.serverRpc = pDnode->trans.serverRpc,
|
.serverRpc = pDnode->trans.serverRpc,
|
||||||
|
.statusRpc = pDnode->trans.statusRpc,
|
||||||
|
.syncRpc = pDnode->trans.syncRpc,
|
||||||
.sendReqFp = dmSendReq,
|
.sendReqFp = dmSendReq,
|
||||||
|
.sendSyncReqFp = dmSendSyncReq,
|
||||||
.sendRspFp = dmSendRsp,
|
.sendRspFp = dmSendRsp,
|
||||||
.registerBrokenLinkArgFp = dmRegisterBrokenLinkArg,
|
.registerBrokenLinkArgFp = dmRegisterBrokenLinkArg,
|
||||||
.releaseHandleFp = dmReleaseHandle,
|
.releaseHandleFp = dmReleaseHandle,
|
||||||
|
|
|
@ -71,7 +71,8 @@ int32_t mndPersistStream(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream
|
||||||
|
|
||||||
int32_t mndStreamRegisterTrans(STrans* pTrans, const char* pTransName, int64_t streamUid);
|
int32_t mndStreamRegisterTrans(STrans* pTrans, const char* pTransName, int64_t streamUid);
|
||||||
int32_t mndAddtoCheckpointWaitingList(SStreamObj *pStream, int64_t checkpointId);
|
int32_t mndAddtoCheckpointWaitingList(SStreamObj *pStream, int64_t checkpointId);
|
||||||
bool streamTransConflictOtherTrans(SMnode *pMnode, int64_t streamUid, const char *pTransName, bool lock);
|
bool mndStreamTransConflictCheck(SMnode *pMnode, int64_t streamUid, const char *pTransName, bool lock);
|
||||||
|
int32_t mndStreamGetRelTrans(SMnode *pMnode, int64_t streamUid);
|
||||||
|
|
||||||
// for sma
|
// for sma
|
||||||
// TODO refactor
|
// TODO refactor
|
||||||
|
|
|
@ -32,6 +32,10 @@ int32_t sendReq(const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
||||||
terrno = TSDB_CODE_INVALID_PTR;
|
terrno = TSDB_CODE_INVALID_PTR;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
int32_t sendSyncReq(const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
||||||
|
terrno = TSDB_CODE_INVALID_PTR;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
char *i642str(int64_t val) {
|
char *i642str(int64_t val) {
|
||||||
static char str[24] = {0};
|
static char str[24] = {0};
|
||||||
|
@ -568,6 +572,7 @@ void mndDumpSdb() {
|
||||||
SMsgCb msgCb = {0};
|
SMsgCb msgCb = {0};
|
||||||
msgCb.reportStartupFp = reportStartup;
|
msgCb.reportStartupFp = reportStartup;
|
||||||
msgCb.sendReqFp = sendReq;
|
msgCb.sendReqFp = sendReq;
|
||||||
|
msgCb.sendSyncReqFp = sendSyncReq;
|
||||||
msgCb.sendRspFp = sendRsp;
|
msgCb.sendRspFp = sendRsp;
|
||||||
msgCb.mgmt = (SMgmtWrapper *)(&msgCb); // hack
|
msgCb.mgmt = (SMgmtWrapper *)(&msgCb); // hack
|
||||||
tmsgSetDefault(&msgCb);
|
tmsgSetDefault(&msgCb);
|
||||||
|
|
|
@ -145,7 +145,7 @@ static void mndCalMqRebalance(SMnode *pMnode) {
|
||||||
void *pReq = mndBuildTimerMsg(&contLen);
|
void *pReq = mndBuildTimerMsg(&contLen);
|
||||||
if (pReq != NULL) {
|
if (pReq != NULL) {
|
||||||
SRpcMsg rpcMsg = {.msgType = TDMT_MND_TMQ_TIMER, .pCont = pReq, .contLen = contLen};
|
SRpcMsg rpcMsg = {.msgType = TDMT_MND_TMQ_TIMER, .pCont = pReq, .contLen = contLen};
|
||||||
tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg);
|
tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -299,14 +299,14 @@ static bool mnodeIsNotLeader(SMnode *pMnode) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t minCronTime() {
|
static int32_t minCronTime() {
|
||||||
int64_t min = INT64_MAX;
|
int32_t min = INT32_MAX;
|
||||||
min = TMIN(min, tsTtlPushIntervalSec);
|
min = TMIN(min, tsTtlPushIntervalSec);
|
||||||
min = TMIN(min, tsTrimVDbIntervalSec);
|
min = TMIN(min, tsTrimVDbIntervalSec);
|
||||||
min = TMIN(min, tsTransPullupInterval);
|
min = TMIN(min, tsTransPullupInterval);
|
||||||
min = TMIN(min, tsCompactPullupInterval);
|
min = TMIN(min, tsCompactPullupInterval);
|
||||||
min = TMIN(min, tsMqRebalanceInterval);
|
min = TMIN(min, tsMqRebalanceInterval);
|
||||||
min = TMIN(min, tsStreamCheckpointInterval);
|
min = TMIN(min, tsStreamCheckpointInterval);
|
||||||
min = TMIN(min, 5); // checkpointRemain
|
min = TMIN(min, 6); // checkpointRemain
|
||||||
min = TMIN(min, tsStreamNodeCheckInterval);
|
min = TMIN(min, tsStreamNodeCheckInterval);
|
||||||
|
|
||||||
int64_t telemInt = TMIN(60, (tsTelemInterval - 1));
|
int64_t telemInt = TMIN(60, (tsTelemInterval - 1));
|
||||||
|
@ -386,7 +386,8 @@ static void *mndThreadFp(void *param) {
|
||||||
int64_t minCron = minCronTime();
|
int64_t minCron = minCronTime();
|
||||||
if (sec % minCron == 0 && mnodeIsNotLeader(pMnode)) {
|
if (sec % minCron == 0 && mnodeIsNotLeader(pMnode)) {
|
||||||
// not leader, do nothing
|
// not leader, do nothing
|
||||||
mTrace("timer not process since mnode is not leader, reason: %s", tstrerror(terrno)) terrno = 0;
|
mTrace("timer not process since mnode is not leader, reason: %s", tstrerror(terrno));
|
||||||
|
terrno = 0;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
mndDoTimerPullupTask(pMnode, sec);
|
mndDoTimerPullupTask(pMnode, sec);
|
||||||
|
|
|
@ -80,7 +80,9 @@ static int32_t createStreamUpdateTrans(SStreamObj *pStream, SVgroupChangeInfo *p
|
||||||
static void removeStreamTasksInBuf(SStreamObj *pStream, SStreamExecInfo *pExecNode);
|
static void removeStreamTasksInBuf(SStreamObj *pStream, SStreamExecInfo *pExecNode);
|
||||||
static void saveStreamTasksInfo(SStreamObj *pStream, SStreamExecInfo *pExecNode);
|
static void saveStreamTasksInfo(SStreamObj *pStream, SStreamExecInfo *pExecNode);
|
||||||
static int32_t removeExpirednodeEntryAndTask(SArray *pNodeSnapshot);
|
static int32_t removeExpirednodeEntryAndTask(SArray *pNodeSnapshot);
|
||||||
static int32_t killActiveCheckpointTrans(SMnode *pMnode, const char *pDbName, size_t len);
|
static int32_t doKillCheckpointTrans(SMnode *pMnode, const char *pDbName, size_t len);
|
||||||
|
static void killTransImpl(SMnode *pMnode, int32_t transId, const char *pDbName);
|
||||||
|
|
||||||
static int32_t setNodeEpsetExpiredFlag(const SArray *pNodeList);
|
static int32_t setNodeEpsetExpiredFlag(const SArray *pNodeList);
|
||||||
static void freeCheckpointCandEntry(void *);
|
static void freeCheckpointCandEntry(void *);
|
||||||
|
|
||||||
|
@ -93,9 +95,6 @@ static int32_t mndStreamSeqActionInsert(SSdb *pSdb, SStreamSeq *pStream);
|
||||||
static int32_t mndStreamSeqActionDelete(SSdb *pSdb, SStreamSeq *pStream);
|
static int32_t mndStreamSeqActionDelete(SSdb *pSdb, SStreamSeq *pStream);
|
||||||
static int32_t mndStreamSeqActionUpdate(SSdb *pSdb, SStreamSeq *pOldStream, SStreamSeq *pNewStream);
|
static int32_t mndStreamSeqActionUpdate(SSdb *pSdb, SStreamSeq *pOldStream, SStreamSeq *pNewStream);
|
||||||
|
|
||||||
static SSdbRaw *mndStreamActionEncode(SStreamObj *pStream);
|
|
||||||
static SSdbRow *mndStreamActionDecode(SSdbRaw *pRaw);
|
|
||||||
|
|
||||||
int32_t mndInitStream(SMnode *pMnode) {
|
int32_t mndInitStream(SMnode *pMnode) {
|
||||||
SSdbTable table = {
|
SSdbTable table = {
|
||||||
.sdbType = SDB_STREAM,
|
.sdbType = SDB_STREAM,
|
||||||
|
@ -1047,7 +1046,7 @@ static int32_t mndProcessStreamCheckpointTrans(SMnode *pMnode, SStreamObj *pStre
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool conflict = streamTransConflictOtherTrans(pMnode, pStream->uid, MND_STREAM_CHECKPOINT_NAME, true);
|
bool conflict = mndStreamTransConflictCheck(pMnode, pStream->uid, MND_STREAM_CHECKPOINT_NAME, true);
|
||||||
if (conflict) {
|
if (conflict) {
|
||||||
mndAddtoCheckpointWaitingList(pStream, checkpointId);
|
mndAddtoCheckpointWaitingList(pStream, checkpointId);
|
||||||
mWarn("checkpoint conflict with other trans in %s, ignore the checkpoint for stream:%s %" PRIx64, pStream->sourceDb,
|
mWarn("checkpoint conflict with other trans in %s, ignore the checkpoint for stream:%s %" PRIx64, pStream->sourceDb,
|
||||||
|
@ -1344,6 +1343,7 @@ static int32_t mndProcessStreamCheckpointInCandid(SRpcMsg *pReq) {
|
||||||
if (ps == NULL) {
|
if (ps == NULL) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
mDebug("start to launch checkpoint for stream:%s %" PRIx64 " in candidate list", pEntry->pName, pEntry->streamId);
|
mDebug("start to launch checkpoint for stream:%s %" PRIx64 " in candidate list", pEntry->pName, pEntry->streamId);
|
||||||
|
|
||||||
code = mndProcessStreamCheckpointTrans(pMnode, ps, pEntry->checkpointId);
|
code = mndProcessStreamCheckpointTrans(pMnode, ps, pEntry->checkpointId);
|
||||||
|
@ -1399,7 +1399,7 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if it is conflict with other trans in both sourceDb and targetDb.
|
// check if it is conflict with other trans in both sourceDb and targetDb.
|
||||||
bool conflict = streamTransConflictOtherTrans(pMnode, pStream->uid, MND_STREAM_DROP_NAME, true);
|
bool conflict = mndStreamTransConflictCheck(pMnode, pStream->uid, MND_STREAM_DROP_NAME, true);
|
||||||
if (conflict) {
|
if (conflict) {
|
||||||
sdbRelease(pMnode->pSdb, pStream);
|
sdbRelease(pMnode->pSdb, pStream);
|
||||||
tFreeMDropStreamReq(&dropReq);
|
tFreeMDropStreamReq(&dropReq);
|
||||||
|
@ -1451,6 +1451,13 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// kill the related checkpoint trans
|
||||||
|
int32_t transId = mndStreamGetRelTrans(pMnode, pStream->uid);
|
||||||
|
if (transId != 0) {
|
||||||
|
mDebug("drop active related transId:%d due to stream:%s dropped", transId, pStream->name);
|
||||||
|
killTransImpl(pMnode, transId, pStream->sourceDb);
|
||||||
|
}
|
||||||
|
|
||||||
removeStreamTasksInBuf(pStream, &execInfo);
|
removeStreamTasksInBuf(pStream, &execInfo);
|
||||||
|
|
||||||
SName name = {0};
|
SName name = {0};
|
||||||
|
@ -1491,6 +1498,17 @@ int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// kill the related checkpoint trans
|
||||||
|
int32_t transId = mndStreamGetRelTrans(pMnode, pStream->uid);
|
||||||
|
if (transId != 0) {
|
||||||
|
mDebug("drop active related transId:%d due to stream:%s dropped", transId, pStream->name);
|
||||||
|
killTransImpl(pMnode, transId, pStream->sourceDb);
|
||||||
|
}
|
||||||
|
|
||||||
|
// drop the stream obj in execInfo
|
||||||
|
removeStreamTasksInBuf(pStream, &execInfo);
|
||||||
|
|
||||||
if (mndPersistDropStreamLog(pMnode, pTrans, pStream) < 0) {
|
if (mndPersistDropStreamLog(pMnode, pTrans, pStream) < 0) {
|
||||||
sdbRelease(pSdb, pStream);
|
sdbRelease(pSdb, pStream);
|
||||||
sdbCancelFetch(pSdb, pIter);
|
sdbCancelFetch(pSdb, pIter);
|
||||||
|
@ -1623,10 +1641,18 @@ static void mndCancelGetNextStream(SMnode *pMnode, void *pIter) {
|
||||||
sdbCancelFetch(pSdb, pIter);
|
sdbCancelFetch(pSdb, pIter);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void setTaskAttrInResBlock(SStreamObj *pStream, SStreamTask *pTask, SSDataBlock *pBlock, int32_t numOfRows) {
|
static int32_t setTaskAttrInResBlock(SStreamObj *pStream, SStreamTask *pTask, SSDataBlock *pBlock, int32_t numOfRows) {
|
||||||
SColumnInfoData *pColInfo;
|
SColumnInfoData *pColInfo;
|
||||||
int32_t cols = 0;
|
int32_t cols = 0;
|
||||||
|
|
||||||
|
STaskId id = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId};
|
||||||
|
|
||||||
|
STaskStatusEntry *pe = taosHashGet(execInfo.pTaskMap, &id, sizeof(id));
|
||||||
|
if (pe == NULL) {
|
||||||
|
mError("task:0x%" PRIx64 " not exists in vnode, no valid status/stage info", id.taskId);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
// stream name
|
// stream name
|
||||||
char streamName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
|
char streamName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||||
STR_WITH_MAXSIZE_TO_VARSTR(streamName, mndGetDbStr(pStream->name), sizeof(streamName));
|
STR_WITH_MAXSIZE_TO_VARSTR(streamName, mndGetDbStr(pStream->name), sizeof(streamName));
|
||||||
|
@ -1678,13 +1704,6 @@ static void setTaskAttrInResBlock(SStreamObj *pStream, SStreamTask *pTask, SSDat
|
||||||
|
|
||||||
// status
|
// status
|
||||||
char status[20 + VARSTR_HEADER_SIZE] = {0};
|
char status[20 + VARSTR_HEADER_SIZE] = {0};
|
||||||
STaskId id = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId};
|
|
||||||
|
|
||||||
STaskStatusEntry *pe = taosHashGet(execInfo.pTaskMap, &id, sizeof(id));
|
|
||||||
if (pe == NULL) {
|
|
||||||
mError("task:0x%" PRIx64 " not exists in vnode, no valid status/stage info", id.taskId);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const char *pStatus = streamTaskGetStatusStr(pe->status);
|
const char *pStatus = streamTaskGetStatusStr(pe->status);
|
||||||
STR_TO_VARSTR(status, pStatus);
|
STR_TO_VARSTR(status, pStatus);
|
||||||
|
@ -1727,6 +1746,8 @@ static void setTaskAttrInResBlock(SStreamObj *pStream, SStreamTask *pTask, SSDat
|
||||||
|
|
||||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
colDataSetVal(pColInfo, numOfRows, (const char *)vbuf, false);
|
colDataSetVal(pColInfo, numOfRows, (const char *)vbuf, false);
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t getNumOfTasks(SArray *pTaskList) {
|
static int32_t getNumOfTasks(SArray *pTaskList) {
|
||||||
|
@ -1768,10 +1789,12 @@ static int32_t mndRetrieveStreamTask(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
||||||
int32_t numOfLevels = taosArrayGetSize(pLevel);
|
int32_t numOfLevels = taosArrayGetSize(pLevel);
|
||||||
for (int32_t j = 0; j < numOfLevels; j++) {
|
for (int32_t j = 0; j < numOfLevels; j++) {
|
||||||
SStreamTask *pTask = taosArrayGetP(pLevel, j);
|
SStreamTask *pTask = taosArrayGetP(pLevel, j);
|
||||||
setTaskAttrInResBlock(pStream, pTask, pBlock, numOfRows);
|
int32_t code = setTaskAttrInResBlock(pStream, pTask, pBlock, numOfRows);
|
||||||
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
numOfRows++;
|
numOfRows++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// unlock
|
// unlock
|
||||||
taosRUnLockLatch(&pStream->lock);
|
taosRUnLockLatch(&pStream->lock);
|
||||||
|
@ -1838,7 +1861,7 @@ int32_t mndPauseAllStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStre
|
||||||
}
|
}
|
||||||
|
|
||||||
if (atomic_load_8(&pTask->status.taskStatus) != TASK_STATUS__PAUSE) {
|
if (atomic_load_8(&pTask->status.taskStatus) != TASK_STATUS__PAUSE) {
|
||||||
atomic_store_8(&pTask->status.keepTaskStatus, pTask->status.taskStatus);
|
atomic_store_8(&pTask->status.statusBackup, pTask->status.taskStatus);
|
||||||
atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__PAUSE);
|
atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__PAUSE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1894,7 +1917,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if it is conflict with other trans in both sourceDb and targetDb.
|
// check if it is conflict with other trans in both sourceDb and targetDb.
|
||||||
bool conflict = streamTransConflictOtherTrans(pMnode, pStream->uid, MND_STREAM_PAUSE_NAME, true);
|
bool conflict = mndStreamTransConflictCheck(pMnode, pStream->uid, MND_STREAM_PAUSE_NAME, true);
|
||||||
if (conflict) {
|
if (conflict) {
|
||||||
sdbRelease(pMnode->pSdb, pStream);
|
sdbRelease(pMnode->pSdb, pStream);
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -1996,7 +2019,7 @@ int32_t mndResumeAllStreamTasks(STrans *pTrans, SMnode *pMnode, SStreamObj *pStr
|
||||||
}
|
}
|
||||||
|
|
||||||
if (atomic_load_8(&pTask->status.taskStatus) == TASK_STATUS__PAUSE) {
|
if (atomic_load_8(&pTask->status.taskStatus) == TASK_STATUS__PAUSE) {
|
||||||
atomic_store_8(&pTask->status.taskStatus, pTask->status.keepTaskStatus);
|
atomic_store_8(&pTask->status.taskStatus, pTask->status.statusBackup);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2037,7 +2060,7 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if it is conflict with other trans in both sourceDb and targetDb.
|
// check if it is conflict with other trans in both sourceDb and targetDb.
|
||||||
bool conflict = streamTransConflictOtherTrans(pMnode, pStream->uid, MND_STREAM_RESUME_NAME, true);
|
bool conflict = mndStreamTransConflictCheck(pMnode, pStream->uid, MND_STREAM_RESUME_NAME, true);
|
||||||
if (conflict) {
|
if (conflict) {
|
||||||
sdbRelease(pMnode->pSdb, pStream);
|
sdbRelease(pMnode->pSdb, pStream);
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -2343,7 +2366,7 @@ static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChange
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool conflict = streamTransConflictOtherTrans(pMnode, pStream->uid, MND_STREAM_TASK_UPDATE_NAME, false);
|
bool conflict = mndStreamTransConflictCheck(pMnode, pStream->uid, MND_STREAM_TASK_UPDATE_NAME, false);
|
||||||
sdbRelease(pSdb, pStream);
|
sdbRelease(pSdb, pStream);
|
||||||
|
|
||||||
if (conflict) {
|
if (conflict) {
|
||||||
|
@ -2567,7 +2590,7 @@ static void killAllCheckpointTrans(SMnode *pMnode, SVgroupChangeInfo *pChangeInf
|
||||||
|
|
||||||
size_t len = 0;
|
size_t len = 0;
|
||||||
void * pKey = taosHashGetKey(pDb, &len);
|
void * pKey = taosHashGetKey(pDb, &len);
|
||||||
killActiveCheckpointTrans(pMnode, pKey, len);
|
doKillCheckpointTrans(pMnode, pKey, len);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2812,7 +2835,16 @@ int32_t createStreamResetStatusTrans(SMnode *pMnode, SStreamObj *pStream) {
|
||||||
return TSDB_CODE_ACTION_IN_PROGRESS;
|
return TSDB_CODE_ACTION_IN_PROGRESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t killActiveCheckpointTrans(SMnode *pMnode, const char *pDBName, size_t len) {
|
void killTransImpl(SMnode* pMnode, int32_t transId, const char* pDbName) {
|
||||||
|
STrans *pTrans = mndAcquireTrans(pMnode, transId);
|
||||||
|
if (pTrans != NULL) {
|
||||||
|
mInfo("kill active transId:%d in Db:%s", transId, pDbName);
|
||||||
|
mndKillTrans(pMnode, pTrans);
|
||||||
|
mndReleaseTrans(pMnode, pTrans);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t doKillCheckpointTrans(SMnode *pMnode, const char *pDBName, size_t len) {
|
||||||
// data in the hash table will be removed automatically, no need to remove it here.
|
// data in the hash table will be removed automatically, no need to remove it here.
|
||||||
SStreamTransInfo *pTransInfo = taosHashGet(execInfo.transMgmt.pDBTrans, pDBName, len);
|
SStreamTransInfo *pTransInfo = taosHashGet(execInfo.transMgmt.pDBTrans, pDBName, len);
|
||||||
if (pTransInfo == NULL) {
|
if (pTransInfo == NULL) {
|
||||||
|
@ -2825,16 +2857,10 @@ int32_t killActiveCheckpointTrans(SMnode *pMnode, const char *pDBName, size_t le
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
STrans *pTrans = mndAcquireTrans(pMnode, pTransInfo->transId);
|
|
||||||
if (pTrans != NULL) {
|
|
||||||
char* pDupDBName = strndup(pDBName, len);
|
char* pDupDBName = strndup(pDBName, len);
|
||||||
mInfo("kill checkpoint transId:%d in Db:%s", pTransInfo->transId, pDupDBName);
|
killTransImpl(pMnode, pTransInfo->transId, pDupDBName);
|
||||||
taosMemoryFree(pDupDBName);
|
taosMemoryFree(pDupDBName);
|
||||||
|
|
||||||
mndKillTrans(pMnode, pTrans);
|
|
||||||
mndReleaseTrans(pMnode, pTrans);
|
|
||||||
}
|
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2855,7 +2881,7 @@ static int32_t mndResetStatusFromCheckpoint(SMnode *pMnode, int64_t streamId, in
|
||||||
code = TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
code = TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||||
mError("failed to acquire the streamObj:0x%" PRIx64 " to reset checkpoint, may have been dropped", pStream->uid);
|
mError("failed to acquire the streamObj:0x%" PRIx64 " to reset checkpoint, may have been dropped", pStream->uid);
|
||||||
} else {
|
} else {
|
||||||
bool conflict = streamTransConflictOtherTrans(pMnode, pStream->uid, MND_STREAM_TASK_RESET_NAME, false);
|
bool conflict = mndStreamTransConflictCheck(pMnode, pStream->uid, MND_STREAM_TASK_RESET_NAME, false);
|
||||||
if (conflict) {
|
if (conflict) {
|
||||||
mError("stream:%s other trans exists in DB:%s, dstTable:%s failed to start reset-status trans", pStream->name,
|
mError("stream:%s other trans exists in DB:%s, dstTable:%s failed to start reset-status trans", pStream->name,
|
||||||
pStream->sourceDb, pStream->targetSTbName);
|
pStream->sourceDb, pStream->targetSTbName);
|
||||||
|
@ -3090,19 +3116,6 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) {
|
||||||
|
|
||||||
if (p->status != TASK_STATUS__READY) {
|
if (p->status != TASK_STATUS__READY) {
|
||||||
mDebug("received s-task:0x%" PRIx64 " not in ready status:%s", p->id.taskId, streamTaskGetStatusStr(p->status));
|
mDebug("received s-task:0x%" PRIx64 " not in ready status:%s", p->id.taskId, streamTaskGetStatusStr(p->status));
|
||||||
|
|
||||||
// if (p->status == TASK_STATUS__STREAM_SCAN_HISTORY) {
|
|
||||||
// bool drop = needDropRelatedFillhistoryTask(pTaskEntry, &execInfo);
|
|
||||||
// if (drop) {
|
|
||||||
// SStreamObj *pStreamObj = mndGetStreamObj(pMnode, pTaskEntry->id.streamId);
|
|
||||||
// if (pStreamObj == NULL) {
|
|
||||||
// mError("failed to acquire the streamObj:0x%" PRIx64 " it may have been dropped", pStreamObj->uid);
|
|
||||||
// } else {
|
|
||||||
// mndDropRelatedFillhistoryTask(pMnode, pTaskEntry, pStreamObj);
|
|
||||||
// mndReleaseStream(pMnode, pStreamObj);
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -65,7 +65,7 @@ int32_t clearFinishedTrans(SMnode* pMnode) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool streamTransConflictOtherTrans(SMnode* pMnode, int64_t streamUid, const char* pTransName, bool lock) {
|
bool mndStreamTransConflictCheck(SMnode* pMnode, int64_t streamUid, const char* pTransName, bool lock) {
|
||||||
if (lock) {
|
if (lock) {
|
||||||
taosThreadMutexLock(&execInfo.lock);
|
taosThreadMutexLock(&execInfo.lock);
|
||||||
}
|
}
|
||||||
|
@ -92,6 +92,7 @@ bool streamTransConflictOtherTrans(SMnode* pMnode, int64_t streamUid, const char
|
||||||
if (strcmp(pTransName, MND_STREAM_DROP_NAME) != 0) {
|
if (strcmp(pTransName, MND_STREAM_DROP_NAME) != 0) {
|
||||||
mWarn("conflict with other transId:%d streamUid:0x%" PRIx64 ", trans:%s", tInfo.transId, tInfo.streamUid,
|
mWarn("conflict with other transId:%d streamUid:0x%" PRIx64 ", trans:%s", tInfo.transId, tInfo.streamUid,
|
||||||
tInfo.name);
|
tInfo.name);
|
||||||
|
terrno = TSDB_CODE_MND_TRANS_CONFLICT;
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
mDebug("not conflict with checkpoint trans, name:%s, continue create trans", pTransName);
|
mDebug("not conflict with checkpoint trans, name:%s, continue create trans", pTransName);
|
||||||
|
@ -100,6 +101,7 @@ bool streamTransConflictOtherTrans(SMnode* pMnode, int64_t streamUid, const char
|
||||||
(strcmp(tInfo.name, MND_STREAM_TASK_RESET_NAME) == 0)) {
|
(strcmp(tInfo.name, MND_STREAM_TASK_RESET_NAME) == 0)) {
|
||||||
mWarn("conflict with other transId:%d streamUid:0x%" PRIx64 ", trans:%s", tInfo.transId, tInfo.streamUid,
|
mWarn("conflict with other transId:%d streamUid:0x%" PRIx64 ", trans:%s", tInfo.transId, tInfo.streamUid,
|
||||||
tInfo.name);
|
tInfo.name);
|
||||||
|
terrno = TSDB_CODE_MND_TRANS_CONFLICT;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -113,6 +115,30 @@ bool streamTransConflictOtherTrans(SMnode* pMnode, int64_t streamUid, const char
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t mndStreamGetRelTrans(SMnode* pMnode, int64_t streamUid) {
|
||||||
|
taosThreadMutexLock(&execInfo.lock);
|
||||||
|
int32_t num = taosHashGetSize(execInfo.transMgmt.pDBTrans);
|
||||||
|
if (num <= 0) {
|
||||||
|
taosThreadMutexUnlock(&execInfo.lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
clearFinishedTrans(pMnode);
|
||||||
|
SStreamTransInfo* pEntry = taosHashGet(execInfo.transMgmt.pDBTrans, &streamUid, sizeof(streamUid));
|
||||||
|
if (pEntry != NULL) {
|
||||||
|
SStreamTransInfo tInfo = *pEntry;
|
||||||
|
taosThreadMutexUnlock(&execInfo.lock);
|
||||||
|
|
||||||
|
if (strcmp(tInfo.name, MND_STREAM_CHECKPOINT_NAME) == 0 || strcmp(tInfo.name, MND_STREAM_TASK_UPDATE_NAME) == 0) {
|
||||||
|
return tInfo.transId;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
taosThreadMutexUnlock(&execInfo.lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t mndAddtoCheckpointWaitingList(SStreamObj* pStream, int64_t checkpointId) {
|
int32_t mndAddtoCheckpointWaitingList(SStreamObj* pStream, int64_t checkpointId) {
|
||||||
SCheckpointCandEntry* pEntry = taosHashGet(execInfo.transMgmt.pWaitingList, &pStream->uid, sizeof(pStream->uid));
|
SCheckpointCandEntry* pEntry = taosHashGet(execInfo.transMgmt.pWaitingList, &pStream->uid, sizeof(pStream->uid));
|
||||||
if (pEntry == NULL) {
|
if (pEntry == NULL) {
|
||||||
|
|
|
@ -747,7 +747,7 @@ static int32_t mndCheckConsumer(SRpcMsg *pMsg, SHashObj* rebSubHash) {
|
||||||
int32_t hbStatus = atomic_add_fetch_32(&pConsumer->hbStatus, 1);
|
int32_t hbStatus = atomic_add_fetch_32(&pConsumer->hbStatus, 1);
|
||||||
int32_t status = atomic_load_32(&pConsumer->status);
|
int32_t status = atomic_load_32(&pConsumer->status);
|
||||||
|
|
||||||
mInfo("check for consumer:0x%" PRIx64 " status:%d(%s), sub-time:%" PRId64 ", createTime:%" PRId64 ", hbstatus:%d",
|
mDebug("check for consumer:0x%" PRIx64 " status:%d(%s), sub-time:%" PRId64 ", createTime:%" PRId64 ", hbstatus:%d",
|
||||||
pConsumer->consumerId, status, mndConsumerStatusName(status), pConsumer->subscribeTime, pConsumer->createTime,
|
pConsumer->consumerId, status, mndConsumerStatusName(status), pConsumer->subscribeTime, pConsumer->createTime,
|
||||||
hbStatus);
|
hbStatus);
|
||||||
|
|
||||||
|
|
|
@ -61,6 +61,7 @@ class MndTestTrans2 : public ::testing::Test {
|
||||||
static SMsgCb msgCb = {0};
|
static SMsgCb msgCb = {0};
|
||||||
msgCb.reportStartupFp = reportStartup;
|
msgCb.reportStartupFp = reportStartup;
|
||||||
msgCb.sendReqFp = sendReq;
|
msgCb.sendReqFp = sendReq;
|
||||||
|
msgCb.sendSyncReqFp = sendSyncReq;
|
||||||
msgCb.sendRspFp = sendRsp;
|
msgCb.sendRspFp = sendRsp;
|
||||||
msgCb.queueFps[SYNC_QUEUE] = putToQueue;
|
msgCb.queueFps[SYNC_QUEUE] = putToQueue;
|
||||||
msgCb.queueFps[WRITE_QUEUE] = putToQueue;
|
msgCb.queueFps[WRITE_QUEUE] = putToQueue;
|
||||||
|
|
|
@ -91,9 +91,7 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t nextProcessVer
|
||||||
pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer);
|
pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer);
|
||||||
}
|
}
|
||||||
|
|
||||||
char *p = NULL;
|
char* p = streamTaskGetStatus(pTask)->name;
|
||||||
streamTaskGetStatus(pTask, &p);
|
|
||||||
|
|
||||||
if (pTask->info.fillHistory) {
|
if (pTask->info.fillHistory) {
|
||||||
sndInfo("vgId:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
sndInfo("vgId:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||||
" nextProcessVer:%" PRId64
|
" nextProcessVer:%" PRId64
|
||||||
|
@ -147,7 +145,8 @@ FAIL:
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t sndInit(SSnode *pSnode) {
|
int32_t sndInit(SSnode *pSnode) {
|
||||||
tqStreamTaskResetStatus(pSnode->pMeta);
|
int32_t numOfTasks = 0;
|
||||||
|
tqStreamTaskResetStatus(pSnode->pMeta, &numOfTasks);
|
||||||
streamMetaStartAllTasks(pSnode->pMeta);
|
streamMetaStartAllTasks(pSnode->pMeta);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -195,7 +194,7 @@ int32_t sndProcessWriteMsg(SSnode *pSnode, SRpcMsg *pMsg, SRpcMsg *pRsp) {
|
||||||
case TDMT_STREAM_TASK_DEPLOY: {
|
case TDMT_STREAM_TASK_DEPLOY: {
|
||||||
void * pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
void * pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||||
return tqStreamTaskProcessDeployReq(pSnode->pMeta, -1, pReq, len, true, true);
|
return tqStreamTaskProcessDeployReq(pSnode->pMeta, &pSnode->msgCb,pMsg->info.conn.applyIndex, pReq, len, true, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
case TDMT_STREAM_TASK_DROP:
|
case TDMT_STREAM_TASK_DROP:
|
||||||
|
@ -204,6 +203,10 @@ int32_t sndProcessWriteMsg(SSnode *pSnode, SRpcMsg *pMsg, SRpcMsg *pRsp) {
|
||||||
return tqStreamTaskProcessUpdateReq(pSnode->pMeta, &pSnode->msgCb, pMsg, true);
|
return tqStreamTaskProcessUpdateReq(pSnode->pMeta, &pSnode->msgCb, pMsg, true);
|
||||||
case TDMT_VND_STREAM_TASK_RESET:
|
case TDMT_VND_STREAM_TASK_RESET:
|
||||||
return tqStreamTaskProcessTaskResetReq(pSnode->pMeta, pMsg);
|
return tqStreamTaskProcessTaskResetReq(pSnode->pMeta, pMsg);
|
||||||
|
case TDMT_STREAM_TASK_PAUSE:
|
||||||
|
return tqStreamTaskProcessTaskPauseReq(pSnode->pMeta, pMsg->pCont);
|
||||||
|
case TDMT_STREAM_TASK_RESUME:
|
||||||
|
return tqStreamTaskProcessTaskResumeReq(pSnode->pMeta, pMsg->info.conn.applyIndex, pMsg->pCont, false);
|
||||||
default:
|
default:
|
||||||
ASSERT(0);
|
ASSERT(0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -107,6 +107,7 @@ struct STQ {
|
||||||
TTB* pExecStore;
|
TTB* pExecStore;
|
||||||
TTB* pCheckStore;
|
TTB* pCheckStore;
|
||||||
SStreamMeta* pStreamMeta;
|
SStreamMeta* pStreamMeta;
|
||||||
|
void* tqTimer;
|
||||||
};
|
};
|
||||||
|
|
||||||
int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle);
|
int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle);
|
||||||
|
|
|
@ -19,13 +19,25 @@
|
||||||
|
|
||||||
// 0: not init
|
// 0: not init
|
||||||
// 1: already inited
|
// 1: already inited
|
||||||
// 2: wait to be inited or cleaup
|
// 2: wait to be inited or cleanup
|
||||||
static int32_t tqInitialize(STQ* pTq);
|
static int32_t tqInitialize(STQ* pTq);
|
||||||
|
|
||||||
static FORCE_INLINE bool tqIsHandleExec(STqHandle* pHandle) { return TMQ_HANDLE_STATUS_EXEC == pHandle->status; }
|
static FORCE_INLINE bool tqIsHandleExec(STqHandle* pHandle) { return TMQ_HANDLE_STATUS_EXEC == pHandle->status; }
|
||||||
static FORCE_INLINE void tqSetHandleExec(STqHandle* pHandle) { pHandle->status = TMQ_HANDLE_STATUS_EXEC; }
|
static FORCE_INLINE void tqSetHandleExec(STqHandle* pHandle) { pHandle->status = TMQ_HANDLE_STATUS_EXEC; }
|
||||||
static FORCE_INLINE void tqSetHandleIdle(STqHandle* pHandle) { pHandle->status = TMQ_HANDLE_STATUS_IDLE; }
|
static FORCE_INLINE void tqSetHandleIdle(STqHandle* pHandle) { pHandle->status = TMQ_HANDLE_STATUS_IDLE; }
|
||||||
|
|
||||||
|
static int32_t tqTimerInit(STQ* pTq) {
|
||||||
|
pTq->tqTimer = taosTmrInit(100, 100, 1000, "TQ");
|
||||||
|
if (pTq->tqTimer == NULL) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tqTimerCleanUp(STQ* pTq) {
|
||||||
|
taosTmrCleanUp(pTq->tqTimer);
|
||||||
|
}
|
||||||
|
|
||||||
void tqDestroyTqHandle(void* data) {
|
void tqDestroyTqHandle(void* data) {
|
||||||
STqHandle* pData = (STqHandle*)data;
|
STqHandle* pData = (STqHandle*)data;
|
||||||
qDestroyTask(pData->execHandle.task);
|
qDestroyTask(pData->execHandle.task);
|
||||||
|
@ -106,6 +118,7 @@ int32_t tqInitialize(STQ* pTq) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tqTimerInit(pTq);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,6 +149,8 @@ void tqClose(STQ* pTq) {
|
||||||
taosMemoryFree(pTq->path);
|
taosMemoryFree(pTq->path);
|
||||||
tqMetaClose(pTq);
|
tqMetaClose(pTq);
|
||||||
streamMetaClose(pTq->pStreamMeta);
|
streamMetaClose(pTq->pStreamMeta);
|
||||||
|
tqTimerCleanUp(pTq);
|
||||||
|
|
||||||
qDebug("end to close tq");
|
qDebug("end to close tq");
|
||||||
taosMemoryFree(pTq);
|
taosMemoryFree(pTq);
|
||||||
}
|
}
|
||||||
|
@ -842,8 +857,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
|
||||||
pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer);
|
pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer);
|
||||||
}
|
}
|
||||||
|
|
||||||
char* p = NULL;
|
char* p = streamTaskGetStatus(pTask)->name;
|
||||||
streamTaskGetStatus(pTask, &p);
|
|
||||||
|
|
||||||
if (pTask->info.fillHistory) {
|
if (pTask->info.fillHistory) {
|
||||||
tqInfo("vgId:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
tqInfo("vgId:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||||
|
@ -873,7 +887,8 @@ int32_t tqProcessTaskCheckRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
|
int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
|
||||||
return tqStreamTaskProcessDeployReq(pTq->pStreamMeta, sversion, msg, msgLen, vnodeIsRoleLeader(pTq->pVnode), pTq->pVnode->restored);
|
return tqStreamTaskProcessDeployReq(pTq->pStreamMeta, &pTq->pVnode->msgCb, sversion, msg, msgLen,
|
||||||
|
vnodeIsRoleLeader(pTq->pVnode), pTq->pVnode->restored);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void doStartFillhistoryStep2(SStreamTask* pTask, SStreamTask* pStreamTask, STQ* pTq) {
|
static void doStartFillhistoryStep2(SStreamTask* pTask, SStreamTask* pStreamTask, STQ* pTq) {
|
||||||
|
@ -932,8 +947,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
|
|
||||||
// do recovery step1
|
// do recovery step1
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
char* pStatus = NULL;
|
char* pStatus = streamTaskGetStatus(pTask)->name;
|
||||||
streamTaskGetStatus(pTask, &pStatus);
|
|
||||||
|
|
||||||
// avoid multi-thread exec
|
// avoid multi-thread exec
|
||||||
while (1) {
|
while (1) {
|
||||||
|
@ -949,7 +963,6 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
// let's decide which step should be executed now
|
// let's decide which step should be executed now
|
||||||
if (pTask->execInfo.step1Start == 0) {
|
if (pTask->execInfo.step1Start == 0) {
|
||||||
int64_t ts = taosGetTimestampMs();
|
int64_t ts = taosGetTimestampMs();
|
||||||
|
|
||||||
pTask->execInfo.step1Start = ts;
|
pTask->execInfo.step1Start = ts;
|
||||||
tqDebug("s-task:%s start scan-history stage(step 1), status:%s, step1 startTs:%" PRId64, id, pStatus, ts);
|
tqDebug("s-task:%s start scan-history stage(step 1), status:%s, step1 startTs:%" PRId64, id, pStatus, ts);
|
||||||
} else {
|
} else {
|
||||||
|
@ -990,15 +1003,15 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
if (retInfo.ret == TASK_SCANHISTORY_REXEC) {
|
if (retInfo.ret == TASK_SCANHISTORY_REXEC) {
|
||||||
streamReExecScanHistoryFuture(pTask, retInfo.idleTime);
|
streamReExecScanHistoryFuture(pTask, retInfo.idleTime);
|
||||||
} else {
|
} else {
|
||||||
char* p = NULL;
|
SStreamTaskState* p = streamTaskGetStatus(pTask);
|
||||||
ETaskStatus s = streamTaskGetStatus(pTask, &p);
|
ETaskStatus s = p->state;
|
||||||
|
|
||||||
if (s == TASK_STATUS__PAUSE) {
|
if (s == TASK_STATUS__PAUSE) {
|
||||||
tqDebug("s-task:%s is paused in the step1, elapsed time:%.2fs total:%.2fs, sched-status:%d", pTask->id.idStr,
|
tqDebug("s-task:%s is paused in the step1, elapsed time:%.2fs total:%.2fs, sched-status:%d", pTask->id.idStr,
|
||||||
el, pTask->execInfo.step1El, status);
|
el, pTask->execInfo.step1El, status);
|
||||||
} else if (s == TASK_STATUS__STOP || s == TASK_STATUS__DROPPING) {
|
} else if (s == TASK_STATUS__STOP || s == TASK_STATUS__DROPPING) {
|
||||||
tqDebug("s-task:%s status:%p not continue scan-history data, total elapsed time:%.2fs quit", pTask->id.idStr, p,
|
tqDebug("s-task:%s status:%p not continue scan-history data, total elapsed time:%.2fs quit", pTask->id.idStr,
|
||||||
pTask->execInfo.step1El);
|
p->name, pTask->execInfo.step1El);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1038,15 +1051,6 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
streamMetaReleaseTask(pMeta, pStreamTask);
|
streamMetaReleaseTask(pMeta, pStreamTask);
|
||||||
} else {
|
} else {
|
||||||
ASSERT(0);
|
ASSERT(0);
|
||||||
// STimeWindow* pWindow = &pTask->dataRange.window;
|
|
||||||
// ASSERT(HAS_RELATED_FILLHISTORY_TASK(pTask) || streamTaskShouldStop(pTask));
|
|
||||||
//
|
|
||||||
// // Not update the fill-history time window until the state transfer is completed.
|
|
||||||
// tqDebug("s-task:%s scan-history in stream time window completed, start to handle data from WAL, startVer:%" PRId64
|
|
||||||
// ", window:%" PRId64 " - %" PRId64,
|
|
||||||
// id, pTask->chkInfo.nextProcessVer, pWindow->skey, pWindow->ekey);
|
|
||||||
//
|
|
||||||
// code = streamTaskScanHistoryDataComplete(pTask);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_store_32(&pTask->status.inScanHistorySentinel, 0);
|
atomic_store_32(&pTask->status.inScanHistorySentinel, 0);
|
||||||
|
@ -1066,15 +1070,16 @@ int32_t tqProcessTaskScanHistoryFinishRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
|
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
SStreamTaskRunReq* pReq = pMsg->pCont;
|
SStreamTaskRunReq* pReq = pMsg->pCont;
|
||||||
|
|
||||||
int32_t taskId = pReq->taskId;
|
// extracted submit data from wal files for all tasks
|
||||||
|
if (pReq->reqType == STREAM_EXEC_T_EXTRACT_WAL_DATA) {
|
||||||
if (taskId == STREAM_EXEC_EXTRACT_DATA_IN_WAL_ID) { // all tasks are extracted submit data from the wal
|
|
||||||
tqScanWal(pTq);
|
tqScanWal(pTq);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t code = tqStreamTaskProcessRunReq(pTq->pStreamMeta, pMsg, vnodeIsRoleLeader(pTq->pVnode));
|
int32_t code = tqStreamTaskProcessRunReq(pTq->pStreamMeta, pMsg, vnodeIsRoleLeader(pTq->pVnode));
|
||||||
if(code == 0 && taskId > 0){
|
|
||||||
|
// let's continue scan data in the wal files
|
||||||
|
if(code == 0 && pReq->reqType >= 0){
|
||||||
tqScanWalAsync(pTq, false);
|
tqScanWalAsync(pTq, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1094,108 +1099,11 @@ int32_t tqProcessTaskDropReq(STQ* pTq, char* msg, int32_t msgLen) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
|
int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
|
||||||
SVPauseStreamTaskReq* pReq = (SVPauseStreamTaskReq*)msg;
|
return tqStreamTaskProcessTaskPauseReq(pTq->pStreamMeta, msg);
|
||||||
|
|
||||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId);
|
|
||||||
if (pTask == NULL) {
|
|
||||||
tqError("vgId:%d process pause req, failed to acquire task:0x%x, it may have been dropped already", pMeta->vgId,
|
|
||||||
pReq->taskId);
|
|
||||||
// since task is in [STOP|DROPPING] state, it is safe to assume the pause is active
|
|
||||||
return TSDB_CODE_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
tqDebug("s-task:%s receive pause msg from mnode", pTask->id.idStr);
|
|
||||||
streamTaskPause(pTask, pMeta);
|
|
||||||
|
|
||||||
SStreamTask* pHistoryTask = NULL;
|
|
||||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
|
||||||
pHistoryTask = streamMetaAcquireTask(pMeta, pTask->hTaskInfo.id.streamId, pTask->hTaskInfo.id.taskId);
|
|
||||||
if (pHistoryTask == NULL) {
|
|
||||||
tqError("vgId:%d process pause req, failed to acquire fill-history task:0x%" PRIx64
|
|
||||||
", it may have been dropped already",
|
|
||||||
pMeta->vgId, pTask->hTaskInfo.id.taskId);
|
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
|
||||||
|
|
||||||
// since task is in [STOP|DROPPING] state, it is safe to assume the pause is active
|
|
||||||
return TSDB_CODE_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
tqDebug("s-task:%s fill-history task handle paused along with related stream task", pHistoryTask->id.idStr);
|
|
||||||
|
|
||||||
streamTaskPause(pHistoryTask, pMeta);
|
|
||||||
streamMetaReleaseTask(pMeta, pHistoryTask);
|
|
||||||
}
|
|
||||||
|
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
|
||||||
return TSDB_CODE_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t tqProcessTaskResumeImpl(STQ* pTq, SStreamTask* pTask, int64_t sversion, int8_t igUntreated) {
|
|
||||||
int32_t vgId = pTq->pStreamMeta->vgId;
|
|
||||||
if (pTask == NULL) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
streamTaskResume(pTask);
|
|
||||||
ETaskStatus status = streamTaskGetStatus(pTask, NULL);
|
|
||||||
|
|
||||||
int32_t level = pTask->info.taskLevel;
|
|
||||||
if (level == TASK_LEVEL__SINK) {
|
|
||||||
if (status == TASK_STATUS__UNINIT) {
|
|
||||||
}
|
|
||||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (status == TASK_STATUS__READY || status == TASK_STATUS__SCAN_HISTORY || status == TASK_STATUS__CK) {
|
|
||||||
// no lock needs to secure the access of the version
|
|
||||||
if (igUntreated && level == TASK_LEVEL__SOURCE && !pTask->info.fillHistory) {
|
|
||||||
// discard all the data when the stream task is suspended.
|
|
||||||
walReaderSetSkipToVersion(pTask->exec.pWalReader, sversion);
|
|
||||||
tqDebug("vgId:%d s-task:%s resume to exec, prev paused version:%" PRId64 ", start from vnode ver:%" PRId64
|
|
||||||
", schedStatus:%d",
|
|
||||||
vgId, pTask->id.idStr, pTask->chkInfo.nextProcessVer, sversion, pTask->status.schedStatus);
|
|
||||||
} else { // from the previous paused version and go on
|
|
||||||
tqDebug("vgId:%d s-task:%s resume to exec, from paused ver:%" PRId64 ", vnode ver:%" PRId64 ", schedStatus:%d",
|
|
||||||
vgId, pTask->id.idStr, pTask->chkInfo.nextProcessVer, sversion, pTask->status.schedStatus);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (level == TASK_LEVEL__SOURCE && pTask->info.fillHistory && status == TASK_STATUS__SCAN_HISTORY) {
|
|
||||||
streamStartScanHistoryAsync(pTask, igUntreated);
|
|
||||||
} else if (level == TASK_LEVEL__SOURCE && (streamQueueGetNumOfItems(pTask->inputq.queue) == 0)) {
|
|
||||||
tqScanWalAsync(pTq, false);
|
|
||||||
} else {
|
|
||||||
streamSchedExec(pTask);
|
|
||||||
}
|
|
||||||
} else if (status == TASK_STATUS__UNINIT) {
|
|
||||||
// todo: fill-history task init ?
|
|
||||||
if (pTask->info.fillHistory == 0) {
|
|
||||||
EStreamTaskEvent event = /*HAS_RELATED_FILLHISTORY_TASK(pTask) ? TASK_EVENT_INIT_STREAM_SCANHIST : */TASK_EVENT_INIT;
|
|
||||||
streamTaskHandleEvent(pTask->status.pSM, event);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
|
int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
|
||||||
SVResumeStreamTaskReq* pReq = (SVResumeStreamTaskReq*)msg;
|
return tqStreamTaskProcessTaskResumeReq(pTq, sversion, msg, true);
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->streamId, pReq->taskId);
|
|
||||||
int32_t code = tqProcessTaskResumeImpl(pTq, pTask, sversion, pReq->igUntreated);
|
|
||||||
if (code != 0) {
|
|
||||||
return code;
|
|
||||||
}
|
|
||||||
|
|
||||||
STaskId* pHTaskId = &pTask->hTaskInfo.id;
|
|
||||||
SStreamTask* pHistoryTask = streamMetaAcquireTask(pTq->pStreamMeta, pHTaskId->streamId, pHTaskId->taskId);
|
|
||||||
if (pHistoryTask) {
|
|
||||||
code = tqProcessTaskResumeImpl(pTq, pHistoryTask, sversion, pReq->igUntreated);
|
|
||||||
}
|
|
||||||
|
|
||||||
return code;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg) {
|
int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
|
@ -1275,7 +1183,7 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
|
||||||
|
|
||||||
// todo save the checkpoint failed info
|
// todo save the checkpoint failed info
|
||||||
taosThreadMutexLock(&pTask->lock);
|
taosThreadMutexLock(&pTask->lock);
|
||||||
ETaskStatus status = streamTaskGetStatus(pTask, NULL);
|
ETaskStatus status = streamTaskGetStatus(pTask)->state;
|
||||||
|
|
||||||
if (status == TASK_STATUS__HALT || status == TASK_STATUS__PAUSE) {
|
if (status == TASK_STATUS__HALT || status == TASK_STATUS__PAUSE) {
|
||||||
tqError("s-task:%s not ready for checkpoint, since it is halt, ignore this checkpoint:%" PRId64 ", set it failure",
|
tqError("s-task:%s not ready for checkpoint, since it is halt, ignore this checkpoint:%" PRId64 ", set it failure",
|
||||||
|
@ -1362,14 +1270,8 @@ int32_t tqProcessTaskDropHTask(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
taosThreadMutexLock(&pTask->lock);
|
taosThreadMutexLock(&pTask->lock);
|
||||||
ETaskStatus status = streamTaskGetStatus(pTask, NULL);
|
|
||||||
// if (status == TASK_STATUS__STREAM_SCAN_HISTORY) {
|
|
||||||
// streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_SCANHIST_DONE);
|
|
||||||
// }
|
|
||||||
|
|
||||||
SStreamTaskId id = {.streamId = pTask->hTaskInfo.id.streamId, .taskId = pTask->hTaskInfo.id.taskId};
|
SStreamTaskId id = {.streamId = pTask->hTaskInfo.id.streamId, .taskId = pTask->hTaskInfo.id.taskId};
|
||||||
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &id);
|
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &id);
|
||||||
|
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
|
||||||
// clear the scheduler status
|
// clear the scheduler status
|
||||||
|
|
|
@ -24,6 +24,7 @@ static int32_t setWalReaderStartOffset(SStreamTask* pTask, int32_t vgId);
|
||||||
static bool handleFillhistoryScanComplete(SStreamTask* pTask, int64_t ver);
|
static bool handleFillhistoryScanComplete(SStreamTask* pTask, int64_t ver);
|
||||||
static bool taskReadyForDataFromWal(SStreamTask* pTask);
|
static bool taskReadyForDataFromWal(SStreamTask* pTask);
|
||||||
static bool doPutDataIntoInputQFromWal(SStreamTask* pTask, int64_t maxVer, int32_t* numOfItems);
|
static bool doPutDataIntoInputQFromWal(SStreamTask* pTask, int64_t maxVer, int32_t* numOfItems);
|
||||||
|
static int32_t tqScanWalInFuture(STQ* pTq, int32_t numOfTasks, int32_t idleDuration);
|
||||||
|
|
||||||
// extract data blocks(submit/delete) from WAL, and add them into the input queue for all the sources tasks.
|
// extract data blocks(submit/delete) from WAL, and add them into the input queue for all the sources tasks.
|
||||||
int32_t tqScanWal(STQ* pTq) {
|
int32_t tqScanWal(STQ* pTq) {
|
||||||
|
@ -31,35 +32,78 @@ int32_t tqScanWal(STQ* pTq) {
|
||||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||||
int64_t st = taosGetTimestampMs();
|
int64_t st = taosGetTimestampMs();
|
||||||
|
|
||||||
while (1) {
|
tqDebug("vgId:%d continue to check if data in wal are available, scanCounter:%d", vgId, pMeta->scanInfo.scanCounter);
|
||||||
int32_t scan = pMeta->walScanCounter;
|
|
||||||
tqDebug("vgId:%d continue check if data in wal are available, walScanCounter:%d", vgId, scan);
|
|
||||||
|
|
||||||
// check all tasks
|
// check all tasks
|
||||||
|
int32_t numOfTasks = 0;
|
||||||
bool shouldIdle = true;
|
bool shouldIdle = true;
|
||||||
doScanWalForAllTasks(pTq->pStreamMeta, &shouldIdle);
|
doScanWalForAllTasks(pMeta, &shouldIdle);
|
||||||
|
|
||||||
// if (shouldIdle) {
|
|
||||||
streamMetaWLock(pMeta);
|
streamMetaWLock(pMeta);
|
||||||
int32_t times = (--pMeta->walScanCounter);
|
int32_t times = (--pMeta->scanInfo.scanCounter);
|
||||||
ASSERT(pMeta->walScanCounter >= 0);
|
ASSERT(pMeta->scanInfo.scanCounter >= 0);
|
||||||
|
|
||||||
|
numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
|
|
||||||
if (times <= 0) {
|
|
||||||
break;
|
|
||||||
} else {
|
|
||||||
tqDebug("vgId:%d scan wal for stream tasks for %d times in %dms", vgId, times, SCAN_WAL_IDLE_DURATION);
|
|
||||||
}
|
|
||||||
// }
|
|
||||||
|
|
||||||
taosMsleep(SCAN_WAL_IDLE_DURATION);
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t el = (taosGetTimestampMs() - st);
|
int64_t el = (taosGetTimestampMs() - st);
|
||||||
tqDebug("vgId:%d scan wal for stream tasks completed, elapsed time:%" PRId64 " ms", vgId, el);
|
tqDebug("vgId:%d scan wal for stream tasks completed, elapsed time:%" PRId64 " ms", vgId, el);
|
||||||
|
|
||||||
|
if (times > 0) {
|
||||||
|
tqDebug("vgId:%d scan wal for stream tasks for %d times in %dms", vgId, times, SCAN_WAL_IDLE_DURATION);
|
||||||
|
tqScanWalInFuture(pTq, numOfTasks, SCAN_WAL_IDLE_DURATION);
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typedef struct SBuildScanWalMsgParam {
|
||||||
|
STQ* pTq;
|
||||||
|
int32_t numOfTasks;
|
||||||
|
} SBuildScanWalMsgParam;
|
||||||
|
|
||||||
|
static void doStartScanWal(void* param, void* tmrId) {
|
||||||
|
SBuildScanWalMsgParam* pParam = (SBuildScanWalMsgParam*) param;
|
||||||
|
|
||||||
|
int32_t vgId = pParam->pTq->pStreamMeta->vgId;
|
||||||
|
|
||||||
|
SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq));
|
||||||
|
if (pRunReq == NULL) {
|
||||||
|
taosMemoryFree(pParam);
|
||||||
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
tqError("vgId:%d failed to create msg to start wal scanning to launch stream tasks, code:%s", vgId, terrstr());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
tqDebug("vgId:%d create msg to start wal scan, numOfTasks:%d, vnd restored:%d", vgId, pParam->numOfTasks,
|
||||||
|
pParam->pTq->pVnode->restored);
|
||||||
|
|
||||||
|
pRunReq->head.vgId = vgId;
|
||||||
|
pRunReq->streamId = 0;
|
||||||
|
pRunReq->taskId = 0;
|
||||||
|
pRunReq->reqType = STREAM_EXEC_T_EXTRACT_WAL_DATA;
|
||||||
|
|
||||||
|
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
|
||||||
|
tmsgPutToQueue(&pParam->pTq->pVnode->msgCb, STREAM_QUEUE, &msg);
|
||||||
|
|
||||||
|
taosMemoryFree(pParam);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t tqScanWalInFuture(STQ* pTq, int32_t numOfTasks, int32_t idleDuration) {
|
||||||
|
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||||
|
|
||||||
|
SBuildScanWalMsgParam* pParam = taosMemoryMalloc(sizeof(SBuildScanWalMsgParam));
|
||||||
|
|
||||||
|
pParam->pTq = pTq;
|
||||||
|
pParam->numOfTasks = numOfTasks;
|
||||||
|
if (pMeta->scanInfo.scanTimer == NULL) {
|
||||||
|
pMeta->scanInfo.scanTimer = taosTmrStart(doStartScanWal, idleDuration, pParam, pTq->tqTimer);
|
||||||
|
} else {
|
||||||
|
taosTmrReset(doStartScanWal, idleDuration, pParam, pTq->tqTimer, &pMeta->scanInfo.scanTimer);
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t tqScanWalAsync(STQ* pTq, bool ckPause) {
|
int32_t tqScanWalAsync(STQ* pTq, bool ckPause) {
|
||||||
int32_t vgId = TD_VID(pTq->pVnode);
|
int32_t vgId = TD_VID(pTq->pVnode);
|
||||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||||
|
@ -80,23 +124,23 @@ int32_t tqScanWalAsync(STQ* pTq, bool ckPause) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
pMeta->walScanCounter += 1;
|
pMeta->scanInfo.scanCounter += 1;
|
||||||
if (pMeta->walScanCounter > MAX_REPEAT_SCAN_THRESHOLD) {
|
if (pMeta->scanInfo.scanCounter > MAX_REPEAT_SCAN_THRESHOLD) {
|
||||||
pMeta->walScanCounter = MAX_REPEAT_SCAN_THRESHOLD;
|
pMeta->scanInfo.scanCounter = MAX_REPEAT_SCAN_THRESHOLD;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pMeta->walScanCounter > 1) {
|
if (pMeta->scanInfo.scanCounter > 1) {
|
||||||
tqDebug("vgId:%d wal read task has been launched, remain scan times:%d", vgId, pMeta->walScanCounter);
|
tqDebug("vgId:%d wal read task has been launched, remain scan times:%d", vgId, pMeta->scanInfo.scanCounter);
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t numOfPauseTasks = pTq->pStreamMeta->numOfPausedTasks;
|
int32_t numOfPauseTasks = pMeta->numOfPausedTasks;
|
||||||
if (ckPause && numOfTasks == numOfPauseTasks) {
|
if (ckPause && numOfTasks == numOfPauseTasks) {
|
||||||
tqDebug("vgId:%d ignore all submit, all streams had been paused, reset the walScanCounter", vgId);
|
tqDebug("vgId:%d ignore all submit, all streams had been paused, reset the walScanCounter", vgId);
|
||||||
|
|
||||||
// reset the counter value, since we do not launch the scan wal operation.
|
// reset the counter value, since we do not launch the scan wal operation.
|
||||||
pMeta->walScanCounter = 0;
|
pMeta->scanInfo.scanCounter = 0;
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -114,7 +158,8 @@ int32_t tqScanWalAsync(STQ* pTq, bool ckPause) {
|
||||||
|
|
||||||
pRunReq->head.vgId = vgId;
|
pRunReq->head.vgId = vgId;
|
||||||
pRunReq->streamId = 0;
|
pRunReq->streamId = 0;
|
||||||
pRunReq->taskId = STREAM_EXEC_EXTRACT_DATA_IN_WAL_ID;
|
pRunReq->taskId = 0;
|
||||||
|
pRunReq->reqType = STREAM_EXEC_T_EXTRACT_WAL_DATA;
|
||||||
|
|
||||||
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
|
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
|
||||||
tmsgPutToQueue(&pTq->pVnode->msgCb, STREAM_QUEUE, &msg);
|
tmsgPutToQueue(&pTq->pVnode->msgCb, STREAM_QUEUE, &msg);
|
||||||
|
@ -138,7 +183,8 @@ int32_t tqStopStreamTasksAsync(STQ* pTq) {
|
||||||
|
|
||||||
pRunReq->head.vgId = vgId;
|
pRunReq->head.vgId = vgId;
|
||||||
pRunReq->streamId = 0;
|
pRunReq->streamId = 0;
|
||||||
pRunReq->taskId = STREAM_EXEC_STOP_ALL_TASKS_ID;
|
pRunReq->taskId = 0;
|
||||||
|
pRunReq->reqType = STREAM_EXEC_T_STOP_ALL_TASKS;
|
||||||
|
|
||||||
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
|
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
|
||||||
tmsgPutToQueue(&pTq->pVnode->msgCb, STREAM_QUEUE, &msg);
|
tmsgPutToQueue(&pTq->pVnode->msgCb, STREAM_QUEUE, &msg);
|
||||||
|
@ -220,16 +266,15 @@ bool taskReadyForDataFromWal(SStreamTask* pTask) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// not in ready state, do not handle the data from wal
|
// not in ready state, do not handle the data from wal
|
||||||
char* p = NULL;
|
SStreamTaskState* pState = streamTaskGetStatus(pTask);
|
||||||
int32_t status = streamTaskGetStatus(pTask, &p);
|
if (pState->state != TASK_STATUS__READY) {
|
||||||
if (streamTaskGetStatus(pTask, &p) != TASK_STATUS__READY) {
|
tqTrace("s-task:%s not ready for submit block in wal, status:%s", pTask->id.idStr, pState->name);
|
||||||
tqTrace("s-task:%s not ready for submit block in wal, status:%s", pTask->id.idStr, p);
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// fill-history task has entered into the last phase, no need to anything
|
// fill-history task has entered into the last phase, no need to anything
|
||||||
if ((pTask->info.fillHistory == 1) && pTask->status.appendTranstateBlock) {
|
if ((pTask->info.fillHistory == 1) && pTask->status.appendTranstateBlock) {
|
||||||
ASSERT(status == TASK_STATUS__READY);
|
ASSERT(pState->state == TASK_STATUS__READY);
|
||||||
// the maximum version of data in the WAL has reached already, the step2 is done
|
// the maximum version of data in the WAL has reached already, the step2 is done
|
||||||
tqDebug("s-task:%s fill-history reach the maximum ver:%" PRId64 ", not scan wal anymore", pTask->id.idStr,
|
tqDebug("s-task:%s fill-history reach the maximum ver:%" PRId64 ", not scan wal anymore", pTask->id.idStr,
|
||||||
pTask->dataRange.range.maxVer);
|
pTask->dataRange.range.maxVer);
|
||||||
|
@ -342,10 +387,9 @@ int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle) {
|
||||||
|
|
||||||
taosThreadMutexLock(&pTask->lock);
|
taosThreadMutexLock(&pTask->lock);
|
||||||
|
|
||||||
char* p = NULL;
|
SStreamTaskState* pState = streamTaskGetStatus(pTask);
|
||||||
ETaskStatus status = streamTaskGetStatus(pTask, &p);
|
if (pState->state != TASK_STATUS__READY) {
|
||||||
if (status != TASK_STATUS__READY) {
|
tqDebug("s-task:%s not ready for submit block from wal, status:%s", pTask->id.idStr, pState->name);
|
||||||
tqDebug("s-task:%s not ready for submit block from wal, status:%s", pTask->id.idStr, p);
|
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
streamMetaReleaseTask(pStreamMeta, pTask);
|
streamMetaReleaseTask(pStreamMeta, pTask);
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -25,7 +25,6 @@ typedef struct STaskUpdateEntry {
|
||||||
|
|
||||||
int32_t tqStreamTaskStartAsync(SStreamMeta* pMeta, SMsgCb* cb, bool restart) {
|
int32_t tqStreamTaskStartAsync(SStreamMeta* pMeta, SMsgCb* cb, bool restart) {
|
||||||
int32_t vgId = pMeta->vgId;
|
int32_t vgId = pMeta->vgId;
|
||||||
|
|
||||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||||
if (numOfTasks == 0) {
|
if (numOfTasks == 0) {
|
||||||
tqDebug("vgId:%d no stream tasks existed to run", vgId);
|
tqDebug("vgId:%d no stream tasks existed to run", vgId);
|
||||||
|
@ -42,7 +41,62 @@ int32_t tqStreamTaskStartAsync(SStreamMeta* pMeta, SMsgCb* cb, bool restart) {
|
||||||
tqDebug("vgId:%d start all %d stream task(s) async", vgId, numOfTasks);
|
tqDebug("vgId:%d start all %d stream task(s) async", vgId, numOfTasks);
|
||||||
pRunReq->head.vgId = vgId;
|
pRunReq->head.vgId = vgId;
|
||||||
pRunReq->streamId = 0;
|
pRunReq->streamId = 0;
|
||||||
pRunReq->taskId = restart ? STREAM_EXEC_RESTART_ALL_TASKS_ID : STREAM_EXEC_START_ALL_TASKS_ID;
|
pRunReq->taskId = 0;
|
||||||
|
pRunReq->reqType = restart ? STREAM_EXEC_T_RESTART_ALL_TASKS : STREAM_EXEC_T_START_ALL_TASKS;
|
||||||
|
|
||||||
|
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
|
||||||
|
tmsgPutToQueue(cb, STREAM_QUEUE, &msg);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t tqStreamOneTaskStartAsync(SStreamMeta* pMeta, SMsgCb* cb, int64_t streamId, int32_t taskId) {
|
||||||
|
int32_t vgId = pMeta->vgId;
|
||||||
|
|
||||||
|
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||||
|
if (numOfTasks == 0) {
|
||||||
|
tqDebug("vgId:%d no stream tasks existed to run", vgId);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq));
|
||||||
|
if (pRunReq == NULL) {
|
||||||
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
tqError("vgId:%d failed to create msg to start task:0x%x, code:%s", vgId, taskId, terrstr());
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
tqDebug("vgId:%d start task:0x%x async", vgId, taskId);
|
||||||
|
pRunReq->head.vgId = vgId;
|
||||||
|
pRunReq->streamId = streamId;
|
||||||
|
pRunReq->taskId = taskId;
|
||||||
|
pRunReq->reqType = STREAM_EXEC_T_START_ONE_TASK;
|
||||||
|
|
||||||
|
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
|
||||||
|
tmsgPutToQueue(cb, STREAM_QUEUE, &msg);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t tqUpdateNodeEpsetAsync(SStreamMeta* pMeta, SMsgCb* cb, int64_t streamId, int32_t taskId) {
|
||||||
|
int32_t vgId = pMeta->vgId;
|
||||||
|
|
||||||
|
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||||
|
if (numOfTasks == 0) {
|
||||||
|
tqDebug("vgId:%d no stream tasks existed to run", vgId);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq));
|
||||||
|
if (pRunReq == NULL) {
|
||||||
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
tqError("vgId:%d failed to create msg to start task:0x%x, code:%s", vgId, taskId, terrstr());
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
tqDebug("vgId:%d update s-task:0x%x nodeEpset async", vgId, taskId);
|
||||||
|
pRunReq->head.vgId = vgId;
|
||||||
|
pRunReq->streamId = streamId;
|
||||||
|
pRunReq->taskId = taskId;
|
||||||
|
pRunReq->reqType = STREAM_EXEC_T_UPDATE_TASK_EPSET;
|
||||||
|
|
||||||
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
|
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
|
||||||
tmsgPutToQueue(cb, STREAM_QUEUE, &msg);
|
tmsgPutToQueue(cb, STREAM_QUEUE, &msg);
|
||||||
|
@ -54,6 +108,7 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
||||||
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||||
SRpcMsg rsp = {.info = pMsg->info, .code = TSDB_CODE_SUCCESS};
|
SRpcMsg rsp = {.info = pMsg->info, .code = TSDB_CODE_SUCCESS};
|
||||||
|
int64_t st = taosGetTimestampMs();
|
||||||
|
|
||||||
SStreamTaskNodeUpdateMsg req = {0};
|
SStreamTaskNodeUpdateMsg req = {0};
|
||||||
|
|
||||||
|
@ -75,7 +130,7 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
||||||
STaskId id = {.streamId = req.streamId, .taskId = req.taskId};
|
STaskId id = {.streamId = req.streamId, .taskId = req.taskId};
|
||||||
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||||
if (ppTask == NULL || *ppTask == NULL) {
|
if (ppTask == NULL || *ppTask == NULL) {
|
||||||
tqError("vgId:%d failed to acquire task:0x%x when handling update, it may have been dropped already", pMeta->vgId,
|
tqError("vgId:%d failed to acquire task:0x%x when handling update, it may have been dropped already", vgId,
|
||||||
req.taskId);
|
req.taskId);
|
||||||
rsp.code = TSDB_CODE_SUCCESS;
|
rsp.code = TSDB_CODE_SUCCESS;
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
|
@ -85,43 +140,38 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamTask* pTask = *ppTask;
|
SStreamTask* pTask = *ppTask;
|
||||||
|
const char* idstr = pTask->id.idStr;
|
||||||
|
|
||||||
if (pMeta->updateInfo.transId != req.transId) {
|
if (pMeta->updateInfo.transId != req.transId) {
|
||||||
pMeta->updateInfo.transId = req.transId;
|
pMeta->updateInfo.transId = req.transId;
|
||||||
tqInfo("s-task:%s receive new trans to update nodeEp msg from mnode, transId:%d", pTask->id.idStr, req.transId);
|
tqInfo("s-task:%s receive new trans to update nodeEp msg from mnode, transId:%d", idstr, req.transId);
|
||||||
// info needs to be kept till the new trans to update the nodeEp arrived.
|
// info needs to be kept till the new trans to update the nodeEp arrived.
|
||||||
taosHashClear(pMeta->updateInfo.pTasks);
|
taosHashClear(pMeta->updateInfo.pTasks);
|
||||||
} else {
|
} else {
|
||||||
tqDebug("s-task:%s recv trans to update nodeEp from mnode, transId:%d", pTask->id.idStr, req.transId);
|
tqDebug("s-task:%s recv trans to update nodeEp from mnode, transId:%d", idstr, req.transId);
|
||||||
}
|
}
|
||||||
|
|
||||||
STaskUpdateEntry entry = {.streamId = req.streamId, .taskId = req.taskId, .transId = req.transId};
|
STaskUpdateEntry entry = {.streamId = req.streamId, .taskId = req.taskId, .transId = req.transId};
|
||||||
void* exist = taosHashGet(pMeta->updateInfo.pTasks, &entry, sizeof(STaskUpdateEntry));
|
|
||||||
if (exist != NULL) {
|
void* pReqTask = taosHashGet(pMeta->updateInfo.pTasks, &entry, sizeof(STaskUpdateEntry));
|
||||||
tqDebug("s-task:%s (vgId:%d) already update in trans:%d, discard the nodeEp update msg", pTask->id.idStr, vgId,
|
if (pReqTask != NULL) {
|
||||||
req.transId);
|
tqDebug("s-task:%s (vgId:%d) already update in trans:%d, discard the nodeEp update msg", idstr, vgId, req.transId);
|
||||||
rsp.code = TSDB_CODE_SUCCESS;
|
rsp.code = TSDB_CODE_SUCCESS;
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
taosArrayDestroy(req.pNodeList);
|
taosArrayDestroy(req.pNodeList);
|
||||||
return rsp.code;
|
return rsp.code;
|
||||||
}
|
}
|
||||||
|
|
||||||
// streamMetaWUnLock(pMeta);
|
|
||||||
|
|
||||||
// todo for test purpose
|
|
||||||
// the following two functions should not be executed within the scope of meta lock to avoid deadlock
|
// the following two functions should not be executed within the scope of meta lock to avoid deadlock
|
||||||
streamTaskUpdateEpsetInfo(pTask, req.pNodeList);
|
streamTaskUpdateEpsetInfo(pTask, req.pNodeList);
|
||||||
streamTaskResetStatus(pTask);
|
streamTaskResetStatus(pTask);
|
||||||
|
|
||||||
// continue after lock the meta again
|
|
||||||
// streamMetaWLock(pMeta);
|
|
||||||
|
|
||||||
SStreamTask** ppHTask = NULL;
|
SStreamTask** ppHTask = NULL;
|
||||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||||
ppHTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id));
|
ppHTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id));
|
||||||
if (ppHTask == NULL || *ppHTask == NULL) {
|
if (ppHTask == NULL || *ppHTask == NULL) {
|
||||||
tqError("vgId:%d failed to acquire fill-history task:0x%x when handling update, it may have been dropped already",
|
tqError("vgId:%d failed to acquire fill-history task:0x%x when handling update, it may have been dropped already",
|
||||||
pMeta->vgId, req.taskId);
|
vgId, req.taskId);
|
||||||
CLEAR_RELATED_FILLHISTORY_TASK(pTask);
|
CLEAR_RELATED_FILLHISTORY_TASK(pTask);
|
||||||
} else {
|
} else {
|
||||||
tqDebug("s-task:%s fill-history task update nodeEp along with stream task", (*ppHTask)->id.idStr);
|
tqDebug("s-task:%s fill-history task update nodeEp along with stream task", (*ppHTask)->id.idStr);
|
||||||
|
@ -129,17 +179,23 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
if (restored) {
|
||||||
|
tqDebug("s-task:%s vgId:%d start to save task", idstr, vgId);
|
||||||
streamMetaSaveTask(pMeta, pTask);
|
streamMetaSaveTask(pMeta, pTask);
|
||||||
if (ppHTask != NULL) {
|
if (ppHTask != NULL) {
|
||||||
streamMetaSaveTask(pMeta, *ppHTask);
|
streamMetaSaveTask(pMeta, *ppHTask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if 0
|
||||||
if (streamMetaCommit(pMeta) < 0) {
|
if (streamMetaCommit(pMeta) < 0) {
|
||||||
// persist to disk
|
// persist to disk
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
} else {
|
||||||
|
tqDebug("s-task:%s vgId:%d not save since restore not finish", idstr, vgId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tqDebug("s-task:%s vgId:%d start to stop task after save task", idstr, vgId);
|
||||||
streamTaskStop(pTask);
|
streamTaskStop(pTask);
|
||||||
|
|
||||||
// keep the already handled info
|
// keep the already handled info
|
||||||
|
@ -147,10 +203,15 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
||||||
|
|
||||||
if (ppHTask != NULL) {
|
if (ppHTask != NULL) {
|
||||||
streamTaskStop(*ppHTask);
|
streamTaskStop(*ppHTask);
|
||||||
tqDebug("s-task:%s task nodeEp update completed, streamTask and related fill-history task closed", pTask->id.idStr);
|
|
||||||
|
int64_t now = taosGetTimestampMs();
|
||||||
|
tqDebug("s-task:%s vgId:%d task nodeEp update completed, streamTask/fill-history closed, elapsed:%" PRId64 " ms",
|
||||||
|
idstr, vgId, now - st);
|
||||||
taosHashPut(pMeta->updateInfo.pTasks, &(*ppHTask)->id, sizeof(pTask->id), NULL, 0);
|
taosHashPut(pMeta->updateInfo.pTasks, &(*ppHTask)->id, sizeof(pTask->id), NULL, 0);
|
||||||
} else {
|
} else {
|
||||||
tqDebug("s-task:%s task nodeEp update completed, streamTask closed", pTask->id.idStr);
|
int64_t now = taosGetTimestampMs();
|
||||||
|
tqDebug("s-task:%s vgId:%d, task nodeEp update completed, streamTask closed, elapsed time:%" PRId64 "ms", idstr,
|
||||||
|
vgId, now - st);
|
||||||
}
|
}
|
||||||
|
|
||||||
rsp.code = 0;
|
rsp.code = 0;
|
||||||
|
@ -171,11 +232,12 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
||||||
pMeta->startInfo.tasksWillRestart = 0;
|
pMeta->startInfo.tasksWillRestart = 0;
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
} else {
|
} else {
|
||||||
tqDebug("vgId:%d all %d task(s) nodeEp updated and closed", vgId, numOfTasks);
|
tqDebug("vgId:%d all %d task(s) nodeEp updated and closed, transId:%d", vgId, numOfTasks, req.transId);
|
||||||
#if 0
|
#if 0
|
||||||
// for test purpose, to trigger the leader election
|
// for test purpose, to trigger the leader election
|
||||||
taosMSleep(5000);
|
taosMSleep(5000);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
tqStreamTaskStartAsync(pMeta, cb, true);
|
tqStreamTaskStartAsync(pMeta, cb, true);
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
}
|
}
|
||||||
|
@ -399,11 +461,11 @@ int32_t tqStreamTaskProcessCheckReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
||||||
rsp.status = streamTaskCheckStatus(pTask, req.upstreamTaskId, req.upstreamNodeId, req.stage, &rsp.oldStage);
|
rsp.status = streamTaskCheckStatus(pTask, req.upstreamTaskId, req.upstreamNodeId, req.stage, &rsp.oldStage);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
|
||||||
char* p = NULL;
|
SStreamTaskState* pState = streamTaskGetStatus(pTask);
|
||||||
streamTaskGetStatus(pTask, &p);
|
|
||||||
tqDebug("s-task:%s status:%s, stage:%" PRId64 " recv task check req(reqId:0x%" PRIx64
|
tqDebug("s-task:%s status:%s, stage:%" PRId64 " recv task check req(reqId:0x%" PRIx64
|
||||||
") task:0x%x (vgId:%d), check_status:%d",
|
") task:0x%x (vgId:%d), check_status:%d",
|
||||||
pTask->id.idStr, p, rsp.oldStage, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
pTask->id.idStr, pState->name, rsp.oldStage, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId,
|
||||||
|
rsp.status);
|
||||||
} else {
|
} else {
|
||||||
rsp.status = TASK_DOWNSTREAM_NOT_READY;
|
rsp.status = TASK_DOWNSTREAM_NOT_READY;
|
||||||
tqDebug("tq recv task check(taskId:0x%" PRIx64 "-0x%x not built yet) req(reqId:0x%" PRIx64
|
tqDebug("tq recv task check(taskId:0x%" PRIx64 "-0x%x not built yet) req(reqId:0x%" PRIx64
|
||||||
|
@ -415,12 +477,22 @@ int32_t tqStreamTaskProcessCheckReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
||||||
return streamSendCheckRsp(pMeta, &req, &rsp, &pMsg->info, taskId);
|
return streamSendCheckRsp(pMeta, &req, &rsp, &pMsg->info, taskId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void setParam(SStreamTask* pTask, int64_t* initTs, bool* hasHTask, STaskId* pId) {
|
||||||
|
*initTs = pTask->execInfo.init;
|
||||||
|
|
||||||
|
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||||
|
*hasHTask = true;
|
||||||
|
pId->streamId = pTask->hTaskInfo.id.streamId;
|
||||||
|
pId->taskId = pTask->hTaskInfo.id.taskId;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int32_t tqStreamTaskProcessCheckRsp(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader) {
|
int32_t tqStreamTaskProcessCheckRsp(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader) {
|
||||||
char* pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
char* pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||||
int32_t vgId = pMeta->vgId;
|
int32_t vgId = pMeta->vgId;
|
||||||
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
int32_t code;
|
|
||||||
SStreamTaskCheckRsp rsp;
|
SStreamTaskCheckRsp rsp;
|
||||||
|
|
||||||
SDecoder decoder;
|
SDecoder decoder;
|
||||||
|
@ -437,20 +509,62 @@ int32_t tqStreamTaskProcessCheckRsp(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLe
|
||||||
tqDebug("tq task:0x%x (vgId:%d) recv check rsp(reqId:0x%" PRIx64 ") from 0x%x (vgId:%d) status %d",
|
tqDebug("tq task:0x%x (vgId:%d) recv check rsp(reqId:0x%" PRIx64 ") from 0x%x (vgId:%d) status %d",
|
||||||
rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.status);
|
rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.status);
|
||||||
|
|
||||||
|
int64_t initTs = 0;
|
||||||
|
int64_t now = taosGetTimestampMs();
|
||||||
|
STaskId id = {.streamId = rsp.streamId, .taskId = rsp.upstreamTaskId};
|
||||||
|
STaskId fId = {0};
|
||||||
|
bool hasHistoryTask = false;
|
||||||
|
|
||||||
|
// todo extract method
|
||||||
if (!isLeader) {
|
if (!isLeader) {
|
||||||
streamMetaUpdateTaskDownstreamStatus(pMeta, rsp.streamId, rsp.upstreamTaskId, 0, taosGetTimestampMs(), false);
|
// this task may have been stopped, so acquire task may failed. Retrieve it directly from the task hash map.
|
||||||
|
streamMetaRLock(pMeta);
|
||||||
|
|
||||||
|
SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||||
|
if (ppTask != NULL) {
|
||||||
|
setParam(*ppTask, &initTs, &hasHistoryTask, &fId);
|
||||||
|
streamMetaRUnLock(pMeta);
|
||||||
|
|
||||||
|
if (hasHistoryTask) {
|
||||||
|
streamMetaAddTaskLaunchResult(pMeta, fId.streamId, fId.taskId, initTs, now, false);
|
||||||
|
}
|
||||||
|
|
||||||
tqError("vgId:%d not leader, task:0x%x not handle the check rsp, downstream:0x%x (vgId:%d)", vgId,
|
tqError("vgId:%d not leader, task:0x%x not handle the check rsp, downstream:0x%x (vgId:%d)", vgId,
|
||||||
rsp.upstreamTaskId, rsp.downstreamTaskId, rsp.downstreamNodeId);
|
rsp.upstreamTaskId, rsp.downstreamTaskId, rsp.downstreamNodeId);
|
||||||
|
} else {
|
||||||
|
streamMetaRUnLock(pMeta);
|
||||||
|
|
||||||
|
tqError("tq failed to locate the stream task:0x%" PRIx64 "-0x%x (vgId:%d), it may have been destroyed or stopped",
|
||||||
|
rsp.streamId, rsp.upstreamTaskId, vgId);
|
||||||
|
code = terrno = TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||||
|
}
|
||||||
|
|
||||||
|
streamMetaAddTaskLaunchResult(pMeta, rsp.streamId, rsp.upstreamTaskId, initTs, now, false);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, rsp.streamId, rsp.upstreamTaskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pMeta, rsp.streamId, rsp.upstreamTaskId);
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
streamMetaUpdateTaskDownstreamStatus(pMeta, rsp.streamId, rsp.upstreamTaskId, 0, taosGetTimestampMs(), false);
|
streamMetaRLock(pMeta);
|
||||||
|
|
||||||
|
SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||||
|
if (ppTask != NULL) {
|
||||||
|
setParam(*ppTask, &initTs, &hasHistoryTask, &fId);
|
||||||
|
streamMetaRUnLock(pMeta);
|
||||||
|
|
||||||
|
if (hasHistoryTask) {
|
||||||
|
streamMetaAddTaskLaunchResult(pMeta, fId.streamId, fId.taskId, initTs, now, false);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
streamMetaRUnLock(pMeta);
|
||||||
|
}
|
||||||
|
|
||||||
|
streamMetaAddTaskLaunchResult(pMeta, rsp.streamId, rsp.upstreamTaskId, initTs, now, false);
|
||||||
tqError("tq failed to locate the stream task:0x%" PRIx64 "-0x%x (vgId:%d), it may have been destroyed or stopped",
|
tqError("tq failed to locate the stream task:0x%" PRIx64 "-0x%x (vgId:%d), it may have been destroyed or stopped",
|
||||||
rsp.streamId, rsp.upstreamTaskId, vgId);
|
rsp.streamId, rsp.upstreamTaskId, vgId);
|
||||||
terrno = TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
|
||||||
return -1;
|
code = terrno = TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = streamProcessCheckRsp(pTask, &rsp);
|
code = streamProcessCheckRsp(pTask, &rsp);
|
||||||
|
@ -489,8 +603,8 @@ int32_t tqStreamTaskProcessCheckpointReadyMsg(SStreamMeta* pMeta, SRpcMsg* pMsg)
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, int64_t sversion, char* msg, int32_t msgLen, bool isLeader,
|
int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sversion, char* msg, int32_t msgLen,
|
||||||
bool restored) {
|
bool isLeader, bool restored) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
int32_t vgId = pMeta->vgId;
|
int32_t vgId = pMeta->vgId;
|
||||||
|
|
||||||
|
@ -542,18 +656,19 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, int64_t sversion, char*
|
||||||
// only handled in the leader node
|
// only handled in the leader node
|
||||||
if (isLeader) {
|
if (isLeader) {
|
||||||
tqDebug("vgId:%d s-task:0x%x is deployed and add into meta, numOfTasks:%d", vgId, taskId, numOfTasks);
|
tqDebug("vgId:%d s-task:0x%x is deployed and add into meta, numOfTasks:%d", vgId, taskId, numOfTasks);
|
||||||
|
|
||||||
|
if (restored) {
|
||||||
SStreamTask* p = streamMetaAcquireTask(pMeta, streamId, taskId);
|
SStreamTask* p = streamMetaAcquireTask(pMeta, streamId, taskId);
|
||||||
|
if (p != NULL && (p->info.fillHistory == 0)) {
|
||||||
if (p != NULL && restored && p->info.fillHistory == 0) {
|
tqStreamOneTaskStartAsync(pMeta, cb, streamId, taskId);
|
||||||
EStreamTaskEvent event = TASK_EVENT_INIT;
|
|
||||||
streamTaskHandleEvent(p->status.pSM, event);
|
|
||||||
} else if (!restored) {
|
|
||||||
tqWarn("s-task:%s not launched since vnode(vgId:%d) not ready", p->id.idStr, vgId);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
streamMetaReleaseTask(pMeta, p);
|
streamMetaReleaseTask(pMeta, p);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
tqWarn("s-task:0x%x not launched since vnode(vgId:%d) not ready", taskId, vgId);
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
tqDebug("vgId:%d not leader, not launch stream task s-task:0x%x", vgId, taskId);
|
tqDebug("vgId:%d not leader, not launch stream task s-task:0x%x", vgId, taskId);
|
||||||
}
|
}
|
||||||
|
@ -598,21 +713,25 @@ int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tqStreamTaskResetStatus(SStreamMeta* pMeta) {
|
int32_t tqStreamTaskResetStatus(SStreamMeta* pMeta, int32_t* numOfTasks) {
|
||||||
int32_t vgId = pMeta->vgId;
|
int32_t vgId = pMeta->vgId;
|
||||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
*numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||||
|
|
||||||
tqDebug("vgId:%d reset all %d stream task(s) status to be uninit", vgId, numOfTasks);
|
tqDebug("vgId:%d reset all %d stream task(s) status to be uninit", vgId, *numOfTasks);
|
||||||
if (numOfTasks == 0) {
|
if (*numOfTasks == 0) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfTasks; ++i) {
|
for (int32_t i = 0; i < (*numOfTasks); ++i) {
|
||||||
SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i);
|
SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i);
|
||||||
|
|
||||||
STaskId id = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId};
|
STaskId id = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId};
|
||||||
SStreamTask** pTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
SStreamTask** pTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||||
streamTaskResetStatus(*pTask);
|
streamTaskResetStatus(*pTask);
|
||||||
|
|
||||||
|
// if ((*pTask)->info.fillHistory == 1) {
|
||||||
|
// streamResetParamForScanHistory(*pTask);
|
||||||
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -658,9 +777,18 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
STaskStartInfo* pStartInfo = &pMeta->startInfo;
|
||||||
|
taosHashClear(pStartInfo->pReadyTaskSet);
|
||||||
|
taosHashClear(pStartInfo->pFailedTaskSet);
|
||||||
|
pStartInfo->readyTs = 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (isLeader && !tsDisableStream) {
|
if (isLeader && !tsDisableStream) {
|
||||||
tqStreamTaskResetStatus(pMeta);
|
int32_t numOfTasks = 0;
|
||||||
|
tqStreamTaskResetStatus(pMeta, &numOfTasks);
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
|
|
||||||
streamMetaStartAllTasks(pMeta);
|
streamMetaStartAllTasks(pMeta);
|
||||||
} else {
|
} else {
|
||||||
streamMetaResetStartInfo(&pMeta->startInfo);
|
streamMetaResetStartInfo(&pMeta->startInfo);
|
||||||
|
@ -675,21 +803,44 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) {
|
||||||
int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader) {
|
int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader) {
|
||||||
SStreamTaskRunReq* pReq = pMsg->pCont;
|
SStreamTaskRunReq* pReq = pMsg->pCont;
|
||||||
|
|
||||||
int32_t taskId = pReq->taskId;
|
int32_t type = pReq->reqType;
|
||||||
int32_t vgId = pMeta->vgId;
|
int32_t vgId = pMeta->vgId;
|
||||||
|
|
||||||
if (taskId == STREAM_EXEC_START_ALL_TASKS_ID) {
|
if (type == STREAM_EXEC_T_START_ONE_TASK) {
|
||||||
|
streamMetaStartOneTask(pMeta, pReq->streamId, pReq->taskId);
|
||||||
|
return 0;
|
||||||
|
} else if (type == STREAM_EXEC_T_START_ALL_TASKS) {
|
||||||
streamMetaStartAllTasks(pMeta);
|
streamMetaStartAllTasks(pMeta);
|
||||||
return 0;
|
return 0;
|
||||||
} else if (taskId == STREAM_EXEC_RESTART_ALL_TASKS_ID) {
|
} else if (type == STREAM_EXEC_T_RESTART_ALL_TASKS) {
|
||||||
restartStreamTasks(pMeta, isLeader);
|
restartStreamTasks(pMeta, isLeader);
|
||||||
return 0;
|
return 0;
|
||||||
} else if (taskId == STREAM_EXEC_STOP_ALL_TASKS_ID) {
|
} else if (type == STREAM_EXEC_T_STOP_ALL_TASKS) {
|
||||||
streamMetaStopAllTasks(pMeta);
|
streamMetaStopAllTasks(pMeta);
|
||||||
|
return 0;
|
||||||
|
} else if (type == STREAM_EXEC_T_RESUME_TASK) { // task resume to run after idle for a while
|
||||||
|
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId);
|
||||||
|
|
||||||
|
if (pTask != NULL) {
|
||||||
|
char* pStatus = NULL;
|
||||||
|
if (streamTaskReadyToRun(pTask, &pStatus)) {
|
||||||
|
int64_t execTs = pTask->status.lastExecTs;
|
||||||
|
int32_t idle = taosGetTimestampMs() - execTs;
|
||||||
|
tqDebug("s-task:%s task resume to run after idle for:%dms from:%" PRId64, pTask->id.idStr, idle, execTs);
|
||||||
|
|
||||||
|
streamResumeTask(pTask);
|
||||||
|
} else {
|
||||||
|
int8_t status = streamTaskSetSchedStatusInactive(pTask);
|
||||||
|
tqDebug("vgId:%d s-task:%s ignore run req since not in ready state, status:%s, sched-status:%d", vgId,
|
||||||
|
pTask->id.idStr, pStatus, status);
|
||||||
|
}
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, taskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId);
|
||||||
if (pTask != NULL) { // even in halt status, the data in inputQ must be processed
|
if (pTask != NULL) { // even in halt status, the data in inputQ must be processed
|
||||||
char* p = NULL;
|
char* p = NULL;
|
||||||
if (streamTaskReadyToRun(pTask, &p)) {
|
if (streamTaskReadyToRun(pTask, &p)) {
|
||||||
|
@ -706,29 +857,34 @@ int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLead
|
||||||
return 0;
|
return 0;
|
||||||
} else { // NOTE: pTask->status.schedStatus is not updated since it is not be handled by the run exec.
|
} else { // NOTE: pTask->status.schedStatus is not updated since it is not be handled by the run exec.
|
||||||
// todo add one function to handle this
|
// todo add one function to handle this
|
||||||
tqError("vgId:%d failed to found s-task, taskId:0x%x may have been dropped", vgId, taskId);
|
tqError("vgId:%d failed to found s-task, taskId:0x%x may have been dropped", vgId, pReq->taskId);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tqStartTaskCompleteCallback(SStreamMeta* pMeta) {
|
int32_t tqStartTaskCompleteCallback(SStreamMeta* pMeta) {
|
||||||
STaskStartInfo* pStartInfo = &pMeta->startInfo;
|
STaskStartInfo* pStartInfo = &pMeta->startInfo;
|
||||||
streamMetaWLock(pMeta);
|
int32_t vgId = pMeta->vgId;
|
||||||
|
|
||||||
|
streamMetaWLock(pMeta);
|
||||||
|
if (pStartInfo->taskStarting == 1) {
|
||||||
|
tqDebug("vgId:%d already in start tasks procedure in other thread, restartCounter:%d, do nothing", vgId,
|
||||||
|
pMeta->startInfo.restartCount);
|
||||||
|
} else { // not in starting procedure
|
||||||
if (pStartInfo->restartCount > 0) {
|
if (pStartInfo->restartCount > 0) {
|
||||||
pStartInfo->restartCount -= 1;
|
pStartInfo->restartCount -= 1;
|
||||||
|
tqDebug("vgId:%d role:%d need to restart all tasks again, restartCounter:%d", vgId, pMeta->role,
|
||||||
ASSERT(pStartInfo->taskStarting == 0);
|
|
||||||
tqDebug("vgId:%d role:%d need to restart all tasks again, restartCounter:%d", pMeta->vgId, pMeta->role,
|
|
||||||
pStartInfo->restartCount);
|
pStartInfo->restartCount);
|
||||||
|
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
restartStreamTasks(pMeta, (pMeta->role == NODE_ROLE_LEADER));
|
restartStreamTasks(pMeta, (pMeta->role == NODE_ROLE_LEADER));
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
} else {
|
} else {
|
||||||
streamMetaWUnLock(pMeta);
|
tqDebug("vgId:%d start all tasks completed in callbackFn", pMeta->vgId);
|
||||||
tqDebug("vgId:%d start all tasks completed", pMeta->vgId);
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
streamMetaWUnLock(pMeta);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -745,7 +901,7 @@ int32_t tqStreamTaskProcessTaskResetReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
||||||
tqDebug("s-task:%s receive task-reset msg from mnode, reset status and ready for data processing", pTask->id.idStr);
|
tqDebug("s-task:%s receive task-reset msg from mnode, reset status and ready for data processing", pTask->id.idStr);
|
||||||
|
|
||||||
// clear flag set during do checkpoint, and open inputQ for all upstream tasks
|
// clear flag set during do checkpoint, and open inputQ for all upstream tasks
|
||||||
if (streamTaskGetStatus(pTask, NULL) == TASK_STATUS__CK) {
|
if (streamTaskGetStatus(pTask)->state == TASK_STATUS__CK) {
|
||||||
streamTaskClearCheckInfo(pTask, true);
|
streamTaskClearCheckInfo(pTask, true);
|
||||||
streamTaskSetStatusReady(pTask);
|
streamTaskSetStatusReady(pTask);
|
||||||
}
|
}
|
||||||
|
@ -753,3 +909,108 @@ int32_t tqStreamTaskProcessTaskResetReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t tqStreamTaskProcessTaskPauseReq(SStreamMeta* pMeta, char* pMsg){
|
||||||
|
SVPauseStreamTaskReq* pReq = (SVPauseStreamTaskReq*)pMsg;
|
||||||
|
|
||||||
|
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId);
|
||||||
|
if (pTask == NULL) {
|
||||||
|
tqError("vgId:%d process pause req, failed to acquire task:0x%x, it may have been dropped already", pMeta->vgId,
|
||||||
|
pReq->taskId);
|
||||||
|
// since task is in [STOP|DROPPING] state, it is safe to assume the pause is active
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
tqDebug("s-task:%s receive pause msg from mnode", pTask->id.idStr);
|
||||||
|
streamTaskPause(pTask, pMeta);
|
||||||
|
|
||||||
|
SStreamTask* pHistoryTask = NULL;
|
||||||
|
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||||
|
pHistoryTask = streamMetaAcquireTask(pMeta, pTask->hTaskInfo.id.streamId, pTask->hTaskInfo.id.taskId);
|
||||||
|
if (pHistoryTask == NULL) {
|
||||||
|
tqError("vgId:%d process pause req, failed to acquire fill-history task:0x%" PRIx64
|
||||||
|
", it may have been dropped already",
|
||||||
|
pMeta->vgId, pTask->hTaskInfo.id.taskId);
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
|
||||||
|
// since task is in [STOP|DROPPING] state, it is safe to assume the pause is active
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
tqDebug("s-task:%s fill-history task handle paused along with related stream task", pHistoryTask->id.idStr);
|
||||||
|
|
||||||
|
streamTaskPause(pHistoryTask, pMeta);
|
||||||
|
streamMetaReleaseTask(pMeta, pHistoryTask);
|
||||||
|
}
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t tqProcessTaskResumeImpl(void* handle, SStreamTask* pTask, int64_t sversion, int8_t igUntreated, bool fromVnode) {
|
||||||
|
SStreamMeta *pMeta = fromVnode ? ((STQ*)handle)->pStreamMeta : handle;
|
||||||
|
int32_t vgId = pMeta->vgId;
|
||||||
|
if (pTask == NULL) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
streamTaskResume(pTask);
|
||||||
|
ETaskStatus status = streamTaskGetStatus(pTask)->state;
|
||||||
|
|
||||||
|
int32_t level = pTask->info.taskLevel;
|
||||||
|
if (level == TASK_LEVEL__SINK) {
|
||||||
|
if (status == TASK_STATUS__UNINIT) {
|
||||||
|
}
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (status == TASK_STATUS__READY || status == TASK_STATUS__SCAN_HISTORY || status == TASK_STATUS__CK) {
|
||||||
|
// no lock needs to secure the access of the version
|
||||||
|
if (igUntreated && level == TASK_LEVEL__SOURCE && !pTask->info.fillHistory) {
|
||||||
|
// discard all the data when the stream task is suspended.
|
||||||
|
walReaderSetSkipToVersion(pTask->exec.pWalReader, sversion);
|
||||||
|
tqDebug("vgId:%d s-task:%s resume to exec, prev paused version:%" PRId64 ", start from vnode ver:%" PRId64
|
||||||
|
", schedStatus:%d",
|
||||||
|
vgId, pTask->id.idStr, pTask->chkInfo.nextProcessVer, sversion, pTask->status.schedStatus);
|
||||||
|
} else { // from the previous paused version and go on
|
||||||
|
tqDebug("vgId:%d s-task:%s resume to exec, from paused ver:%" PRId64 ", vnode ver:%" PRId64 ", schedStatus:%d",
|
||||||
|
vgId, pTask->id.idStr, pTask->chkInfo.nextProcessVer, sversion, pTask->status.schedStatus);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (level == TASK_LEVEL__SOURCE && pTask->info.fillHistory && status == TASK_STATUS__SCAN_HISTORY) {
|
||||||
|
streamStartScanHistoryAsync(pTask, igUntreated);
|
||||||
|
} else if (level == TASK_LEVEL__SOURCE && (streamQueueGetNumOfItems(pTask->inputq.queue) == 0)) {
|
||||||
|
tqScanWalAsync((STQ*)handle, false);
|
||||||
|
} else {
|
||||||
|
streamSchedExec(pTask);
|
||||||
|
}
|
||||||
|
} else if (status == TASK_STATUS__UNINIT) {
|
||||||
|
// todo: fill-history task init ?
|
||||||
|
if (pTask->info.fillHistory == 0) {
|
||||||
|
EStreamTaskEvent event = /*HAS_RELATED_FILLHISTORY_TASK(pTask) ? TASK_EVENT_INIT_STREAM_SCANHIST : */TASK_EVENT_INIT;
|
||||||
|
streamTaskHandleEvent(pTask->status.pSM, event);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* msg, bool fromVnode){
|
||||||
|
SVResumeStreamTaskReq* pReq = (SVResumeStreamTaskReq*)msg;
|
||||||
|
SStreamMeta *pMeta = fromVnode ? ((STQ*)handle)->pStreamMeta : handle;
|
||||||
|
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId);
|
||||||
|
int32_t code = tqProcessTaskResumeImpl(handle, pTask, sversion, pReq->igUntreated, fromVnode);
|
||||||
|
if (code != 0) {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
STaskId* pHTaskId = &pTask->hTaskInfo.id;
|
||||||
|
SStreamTask* pHistoryTask = streamMetaAcquireTask(pMeta, pHTaskId->streamId, pHTaskId->taskId);
|
||||||
|
if (pHistoryTask) {
|
||||||
|
code = tqProcessTaskResumeImpl(handle, pHistoryTask, sversion, pReq->igUntreated, fromVnode);
|
||||||
|
}
|
||||||
|
|
||||||
|
return code;
|
||||||
|
}
|
|
@ -1990,9 +1990,9 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie
|
||||||
|
|
||||||
if (SFSNEXTROW_FILESET == state->state) {
|
if (SFSNEXTROW_FILESET == state->state) {
|
||||||
_next_fileset:
|
_next_fileset:
|
||||||
if (--state->iFileSet < 0) {
|
|
||||||
clearLastFileSet(state);
|
clearLastFileSet(state);
|
||||||
|
|
||||||
|
if (--state->iFileSet < 0) {
|
||||||
*ppRow = NULL;
|
*ppRow = NULL;
|
||||||
return code;
|
return code;
|
||||||
} else {
|
} else {
|
||||||
|
@ -2862,7 +2862,9 @@ static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SC
|
||||||
|
|
||||||
taosArraySet(pColArray, iCol, &lastCol);
|
taosArraySet(pColArray, iCol, &lastCol);
|
||||||
int32_t aColIndex = taosArraySearchIdx(aColArray, &lastCol.colVal.cid, compareInt16Val, TD_EQ);
|
int32_t aColIndex = taosArraySearchIdx(aColArray, &lastCol.colVal.cid, compareInt16Val, TD_EQ);
|
||||||
|
if (aColIndex >= 0) {
|
||||||
taosArrayRemove(aColArray, aColIndex);
|
taosArrayRemove(aColArray, aColIndex);
|
||||||
|
}
|
||||||
} else if (!COL_VAL_IS_VALUE(tColVal) && !COL_VAL_IS_VALUE(pColVal) && !setNoneCol) {
|
} else if (!COL_VAL_IS_VALUE(tColVal) && !COL_VAL_IS_VALUE(pColVal) && !setNoneCol) {
|
||||||
noneCol = iCol;
|
noneCol = iCol;
|
||||||
setNoneCol = true;
|
setNoneCol = true;
|
||||||
|
|
|
@ -73,6 +73,7 @@ static int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order
|
||||||
static void resetTableListIndex(SReaderStatus* pStatus);
|
static void resetTableListIndex(SReaderStatus* pStatus);
|
||||||
static void getMemTableTimeRange(STsdbReader* pReader, int64_t* pMaxKey, int64_t* pMinKey);
|
static void getMemTableTimeRange(STsdbReader* pReader, int64_t* pMaxKey, int64_t* pMinKey);
|
||||||
static void updateComposedBlockInfo(STsdbReader* pReader, double el, STableBlockScanInfo* pBlockScanInfo);
|
static void updateComposedBlockInfo(STsdbReader* pReader, double el, STableBlockScanInfo* pBlockScanInfo);
|
||||||
|
static int32_t buildFromPreFilesetBuffer(STsdbReader* pReader);
|
||||||
|
|
||||||
static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); }
|
static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); }
|
||||||
|
|
||||||
|
@ -3040,6 +3041,17 @@ static ERetrieveType doReadDataFromSttFiles(STsdbReader* pReader) {
|
||||||
return TSDB_READ_RETURN;
|
return TSDB_READ_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pReader->status.bProcMemPreFileset) {
|
||||||
|
code = buildFromPreFilesetBuffer(pReader);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
if (pResBlock->info.rows > 0) {
|
||||||
|
pReader->status.processingMemPreFileSet = true;
|
||||||
|
return TSDB_READ_RETURN;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (pBlockIter->numOfBlocks > 0) { // there are data blocks existed.
|
if (pBlockIter->numOfBlocks > 0) { // there are data blocks existed.
|
||||||
return TSDB_READ_CONTINUE;
|
return TSDB_READ_CONTINUE;
|
||||||
} else { // all blocks in data file are checked, let's check the data in last files
|
} else { // all blocks in data file are checked, let's check the data in last files
|
||||||
|
@ -4297,6 +4309,7 @@ static int32_t buildFromPreFilesetBuffer(STsdbReader* pReader) {
|
||||||
} else {
|
} else {
|
||||||
tsdbDebug("finished pre-fileset %d buffer processing. %s", fid, pReader->idStr);
|
tsdbDebug("finished pre-fileset %d buffer processing. %s", fid, pReader->idStr);
|
||||||
pStatus->bProcMemPreFileset = false;
|
pStatus->bProcMemPreFileset = false;
|
||||||
|
pStatus->processingMemPreFileSet = false;
|
||||||
if (pReader->notifyFn) {
|
if (pReader->notifyFn) {
|
||||||
STsdReaderNotifyInfo info = {0};
|
STsdReaderNotifyInfo info = {0};
|
||||||
info.duration.filesetId = fid;
|
info.duration.filesetId = fid;
|
||||||
|
@ -4329,7 +4342,7 @@ static int32_t doTsdbNextDataBlockFilesetDelimited(STsdbReader* pReader) {
|
||||||
pStatus->bProcMemFirstFileset, pReader->idStr);
|
pStatus->bProcMemFirstFileset, pReader->idStr);
|
||||||
if (pStatus->bProcMemPreFileset) {
|
if (pStatus->bProcMemPreFileset) {
|
||||||
if (pBlock->info.rows > 0) {
|
if (pBlock->info.rows > 0) {
|
||||||
if (pReader->notifyFn) {
|
if (pReader->notifyFn && !pReader->status.processingMemPreFileSet) {
|
||||||
int32_t fid = pReader->status.pCurrentFileset->fid;
|
int32_t fid = pReader->status.pCurrentFileset->fid;
|
||||||
STsdReaderNotifyInfo info = {0};
|
STsdReaderNotifyInfo info = {0};
|
||||||
info.duration.filesetId = fid;
|
info.duration.filesetId = fid;
|
||||||
|
|
|
@ -238,6 +238,7 @@ typedef struct SReaderStatus {
|
||||||
int64_t prevFilesetStartKey;
|
int64_t prevFilesetStartKey;
|
||||||
int64_t prevFilesetEndKey;
|
int64_t prevFilesetEndKey;
|
||||||
bool bProcMemFirstFileset;
|
bool bProcMemFirstFileset;
|
||||||
|
bool processingMemPreFileSet;
|
||||||
STableUidList procMemUidList;
|
STableUidList procMemUidList;
|
||||||
STableBlockScanInfo** pProcMemTableIter;
|
STableBlockScanInfo** pProcMemTableIter;
|
||||||
} SReaderStatus;
|
} SReaderStatus;
|
||||||
|
|
|
@ -14,11 +14,11 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define _DEFAULT_SOURCE
|
#define _DEFAULT_SOURCE
|
||||||
#include "tq.h"
|
|
||||||
#include "sync.h"
|
#include "sync.h"
|
||||||
|
#include "tq.h"
|
||||||
|
#include "tqCommon.h"
|
||||||
#include "tsdb.h"
|
#include "tsdb.h"
|
||||||
#include "vnd.h"
|
#include "vnd.h"
|
||||||
#include "tqCommon.h"
|
|
||||||
|
|
||||||
#define BATCH_ENABLE 0
|
#define BATCH_ENABLE 0
|
||||||
|
|
||||||
|
@ -411,7 +411,7 @@ static int32_t vnodeSyncEqMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t vnodeSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
static int32_t vnodeSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) {
|
||||||
int32_t code = tmsgSendReq(pEpSet, pMsg);
|
int32_t code = tmsgSendSyncReq(pEpSet, pMsg);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
rpcFreeCont(pMsg->pCont);
|
rpcFreeCont(pMsg->pCont);
|
||||||
pMsg->pCont = NULL;
|
pMsg->pCont = NULL;
|
||||||
|
@ -567,29 +567,30 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx)
|
||||||
if (vnodeIsRoleLeader(pVnode)) {
|
if (vnodeIsRoleLeader(pVnode)) {
|
||||||
// start to restore all stream tasks
|
// start to restore all stream tasks
|
||||||
if (tsDisableStream) {
|
if (tsDisableStream) {
|
||||||
streamMetaWUnLock(pMeta);
|
|
||||||
vInfo("vgId:%d, sync restore finished, not launch stream tasks, since stream tasks are disabled", vgId);
|
vInfo("vgId:%d, sync restore finished, not launch stream tasks, since stream tasks are disabled", vgId);
|
||||||
} else {
|
} else {
|
||||||
vInfo("vgId:%d sync restore finished, start to launch stream tasks", pVnode->config.vgId);
|
vInfo("vgId:%d sync restore finished, start to launch stream task(s)", pVnode->config.vgId);
|
||||||
tqStreamTaskResetStatus(pVnode->pTq->pStreamMeta);
|
int32_t numOfTasks = 0;
|
||||||
|
tqStreamTaskResetStatus(pMeta, &numOfTasks);
|
||||||
|
|
||||||
{
|
if (numOfTasks > 0) {
|
||||||
if (pMeta->startInfo.taskStarting == 1) {
|
if (pMeta->startInfo.taskStarting == 1) {
|
||||||
pMeta->startInfo.restartCount += 1;
|
pMeta->startInfo.restartCount += 1;
|
||||||
tqDebug("vgId:%d in start tasks procedure, inc restartCounter by 1, remaining restart:%d", vgId,
|
tqDebug("vgId:%d in start tasks procedure, inc restartCounter by 1, remaining restart:%d", vgId,
|
||||||
pMeta->startInfo.restartCount);
|
pMeta->startInfo.restartCount);
|
||||||
streamMetaWUnLock(pMeta);
|
|
||||||
} else {
|
} else {
|
||||||
pMeta->startInfo.taskStarting = 1;
|
pMeta->startInfo.taskStarting = 1;
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
tqStreamTaskStartAsync(pMeta, &pVnode->msgCb, false);
|
tqStreamTaskStartAsync(pMeta, &pVnode->msgCb, false);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
streamMetaWUnLock(pMeta);
|
|
||||||
vInfo("vgId:%d, sync restore finished, not launch stream tasks since not leader", vgId);
|
vInfo("vgId:%d, sync restore finished, not launch stream tasks since not leader", vgId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
streamMetaWUnLock(pMeta);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vnodeBecomeFollower(const SSyncFSM *pFsm) {
|
static void vnodeBecomeFollower(const SSyncFSM *pFsm) {
|
||||||
|
|
|
@ -1838,7 +1838,7 @@ static STimeWindow doCalculateTimeWindow(int64_t ts, SInterval* pInterval) {
|
||||||
STimeWindow w = {0};
|
STimeWindow w = {0};
|
||||||
|
|
||||||
w.skey = taosTimeTruncate(ts, pInterval);
|
w.skey = taosTimeTruncate(ts, pInterval);
|
||||||
w.ekey = taosTimeAdd(w.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
|
w.ekey = taosTimeGetIntervalEnd(w.skey, pInterval);
|
||||||
return w;
|
return w;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1887,31 +1887,17 @@ STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowI
|
||||||
}
|
}
|
||||||
|
|
||||||
void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t order) {
|
void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t order) {
|
||||||
|
int64_t slidingStart = 0;
|
||||||
|
if (pInterval->offset > 0) {
|
||||||
|
slidingStart = taosTimeAdd(tw->skey, -1 * pInterval->offset, pInterval->offsetUnit, pInterval->precision);
|
||||||
|
} else {
|
||||||
|
slidingStart = tw->skey;
|
||||||
|
}
|
||||||
int32_t factor = GET_FORWARD_DIRECTION_FACTOR(order);
|
int32_t factor = GET_FORWARD_DIRECTION_FACTOR(order);
|
||||||
if (!IS_CALENDAR_TIME_DURATION(pInterval->slidingUnit)) {
|
slidingStart = taosTimeAdd(slidingStart, factor * pInterval->sliding, pInterval->slidingUnit, pInterval->precision);
|
||||||
tw->skey += pInterval->sliding * factor;
|
tw->skey = taosTimeAdd(slidingStart, pInterval->offset, pInterval->offsetUnit, pInterval->precision);
|
||||||
tw->ekey = taosTimeAdd(tw->skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
|
int64_t slidingEnd = taosTimeAdd(slidingStart, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
|
||||||
return;
|
tw->ekey = taosTimeAdd(slidingEnd, pInterval->offset, pInterval->offsetUnit, pInterval->precision);
|
||||||
}
|
|
||||||
|
|
||||||
// convert key to second
|
|
||||||
int64_t key = convertTimePrecision(tw->skey, pInterval->precision, TSDB_TIME_PRECISION_MILLI) / 1000;
|
|
||||||
|
|
||||||
int64_t duration = pInterval->sliding;
|
|
||||||
if (pInterval->slidingUnit == 'y') {
|
|
||||||
duration *= 12;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct tm tm;
|
|
||||||
time_t t = (time_t)key;
|
|
||||||
taosLocalTime(&t, &tm, NULL);
|
|
||||||
|
|
||||||
int mon = (int)(tm.tm_year * 12 + tm.tm_mon + duration * factor);
|
|
||||||
tm.tm_year = mon / 12;
|
|
||||||
tm.tm_mon = mon % 12;
|
|
||||||
tw->skey = convertTimePrecision((int64_t)taosMktime(&tm) * 1000LL, TSDB_TIME_PRECISION_MILLI, pInterval->precision);
|
|
||||||
|
|
||||||
tw->ekey = taosTimeAdd(tw->skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo) {
|
bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo) {
|
||||||
|
|
|
@ -922,8 +922,8 @@ int32_t qStreamSourceScanParamForHistoryScanStep2(qTaskInfo_t tinfo, SVersionRan
|
||||||
pStreamInfo->fillHistoryWindow = *pWindow;
|
pStreamInfo->fillHistoryWindow = *pWindow;
|
||||||
pStreamInfo->recoverStep = STREAM_RECOVER_STEP__PREPARE2;
|
pStreamInfo->recoverStep = STREAM_RECOVER_STEP__PREPARE2;
|
||||||
|
|
||||||
qDebug("%s step 2. set param for stream scanner for scan-history data, verRange:%" PRId64 " - %" PRId64
|
qDebug("%s step 2. set param for stream scanner scan wal, verRange:%" PRId64 " - %" PRId64 ", window:%" PRId64
|
||||||
", window:%" PRId64 " - %" PRId64,
|
" - %" PRId64,
|
||||||
GET_TASKID(pTaskInfo), pStreamInfo->fillHistoryVer.minVer, pStreamInfo->fillHistoryVer.maxVer, pWindow->skey,
|
GET_TASKID(pTaskInfo), pStreamInfo->fillHistoryVer.minVer, pStreamInfo->fillHistoryVer.maxVer, pWindow->skey,
|
||||||
pWindow->ekey);
|
pWindow->ekey);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1023,6 +1023,57 @@ int32_t qSetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t qResetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo) {
|
||||||
|
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||||
|
SOperatorInfo* pOperator = pTaskInfo->pRoot;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
int32_t type = pOperator->operatorType;
|
||||||
|
if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL || type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL ||
|
||||||
|
type == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL) {
|
||||||
|
SStreamIntervalOperatorInfo* pInfo = pOperator->info;
|
||||||
|
STimeWindowAggSupp* pSup = &pInfo->twAggSup;
|
||||||
|
|
||||||
|
pSup->calTriggerSaved = 0;
|
||||||
|
pSup->deleteMarkSaved = 0;
|
||||||
|
qInfo("reset stream param for interval: %d, %" PRId64, pSup->calTrigger, pSup->deleteMark);
|
||||||
|
|
||||||
|
} else if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION ||
|
||||||
|
type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION ||
|
||||||
|
type == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION) {
|
||||||
|
SStreamSessionAggOperatorInfo* pInfo = pOperator->info;
|
||||||
|
STimeWindowAggSupp* pSup = &pInfo->twAggSup;
|
||||||
|
|
||||||
|
pSup->calTriggerSaved = 0;
|
||||||
|
pSup->deleteMarkSaved = 0;
|
||||||
|
qInfo("reset stream param for session: %d, %" PRId64, pSup->calTrigger, pSup->deleteMark);
|
||||||
|
|
||||||
|
} else if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE) {
|
||||||
|
SStreamStateAggOperatorInfo* pInfo = pOperator->info;
|
||||||
|
STimeWindowAggSupp* pSup = &pInfo->twAggSup;
|
||||||
|
|
||||||
|
pSup->calTriggerSaved = 0;
|
||||||
|
pSup->deleteMarkSaved = 0;
|
||||||
|
qInfo("reset stream param for state: %d, %" PRId64, pSup->calTrigger, pSup->deleteMark);
|
||||||
|
|
||||||
|
} else if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT) {
|
||||||
|
SStreamEventAggOperatorInfo* pInfo = pOperator->info;
|
||||||
|
STimeWindowAggSupp* pSup = &pInfo->twAggSup;
|
||||||
|
|
||||||
|
pSup->calTriggerSaved = 0;
|
||||||
|
pSup->deleteMarkSaved = 0;
|
||||||
|
qInfo("save stream param for state: %d, %" PRId64, pSup->calTrigger, pSup->deleteMark);
|
||||||
|
}
|
||||||
|
|
||||||
|
// iterate operator tree
|
||||||
|
if (pOperator->numOfDownstream != 1 || pOperator->pDownstream[0] == NULL) {
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
pOperator = pOperator->pDownstream[0];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int32_t qRestoreStreamOperatorOption(qTaskInfo_t tinfo) {
|
int32_t qRestoreStreamOperatorOption(qTaskInfo_t tinfo) {
|
||||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||||
const char* id = GET_TASKID(pTaskInfo);
|
const char* id = GET_TASKID(pTaskInfo);
|
||||||
|
@ -1078,7 +1129,7 @@ int32_t qStreamInfoResetTimewindowFilter(qTaskInfo_t tinfo) {
|
||||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||||
STimeWindow* pWindow = &pTaskInfo->streamInfo.fillHistoryWindow;
|
STimeWindow* pWindow = &pTaskInfo->streamInfo.fillHistoryWindow;
|
||||||
|
|
||||||
qDebug("%s remove scan-history filter window:%" PRId64 "-%" PRId64 ", set new window:%" PRId64 "-%" PRId64,
|
qDebug("%s remove timeWindow filter:%" PRId64 "-%" PRId64 ", set new window:%" PRId64 "-%" PRId64,
|
||||||
GET_TASKID(pTaskInfo), pWindow->skey, pWindow->ekey, INT64_MIN, INT64_MAX);
|
GET_TASKID(pTaskInfo), pWindow->skey, pWindow->ekey, INT64_MIN, INT64_MAX);
|
||||||
|
|
||||||
pWindow->skey = INT64_MIN;
|
pWindow->skey = INT64_MIN;
|
||||||
|
|
|
@ -449,7 +449,7 @@ STimeWindow getAlignQueryTimeWindow(const SInterval* pInterval, int64_t key) {
|
||||||
* if the realSkey > INT64_MAX - pInterval->interval, the query duration between
|
* if the realSkey > INT64_MAX - pInterval->interval, the query duration between
|
||||||
* realSkey and realEkey must be less than one interval.Therefore, no need to adjust the query ranges.
|
* realSkey and realEkey must be less than one interval.Therefore, no need to adjust the query ranges.
|
||||||
*/
|
*/
|
||||||
win.ekey = taosTimeAdd(win.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
|
win.ekey = taosTimeGetIntervalEnd(win.skey, pInterval);
|
||||||
if (win.ekey < win.skey) {
|
if (win.ekey < win.skey) {
|
||||||
win.ekey = INT64_MAX;
|
win.ekey = INT64_MAX;
|
||||||
}
|
}
|
||||||
|
|
|
@ -427,6 +427,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) {
|
||||||
taosMemoryFreeClear(param);
|
taosMemoryFreeClear(param);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
static bool allInvertible(SqlFunctionCtx* pFCtx, int32_t numOfCols) {
|
static bool allInvertible(SqlFunctionCtx* pFCtx, int32_t numOfCols) {
|
||||||
for (int32_t i = 0; i < numOfCols; i++) {
|
for (int32_t i = 0; i < numOfCols; i++) {
|
||||||
if (fmIsUserDefinedFunc(pFCtx[i].functionId) || !fmIsInvertible(pFCtx[i].functionId)) {
|
if (fmIsUserDefinedFunc(pFCtx[i].functionId) || !fmIsInvertible(pFCtx[i].functionId)) {
|
||||||
|
@ -435,6 +436,7 @@ static bool allInvertible(SqlFunctionCtx* pFCtx, int32_t numOfCols) {
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void reloadFromDownStream(SOperatorInfo* downstream, SStreamIntervalOperatorInfo* pInfo) {
|
void reloadFromDownStream(SOperatorInfo* downstream, SStreamIntervalOperatorInfo* pInfo) {
|
||||||
SStateStore* pAPI = &downstream->pTaskInfo->storageAPI.stateStore;
|
SStateStore* pAPI = &downstream->pTaskInfo->storageAPI.stateStore;
|
||||||
|
@ -3846,6 +3848,7 @@ _error:
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
static void setInverFunction(SqlFunctionCtx* pCtx, int32_t num, EStreamType type) {
|
static void setInverFunction(SqlFunctionCtx* pCtx, int32_t num, EStreamType type) {
|
||||||
for (int i = 0; i < num; i++) {
|
for (int i = 0; i < num; i++) {
|
||||||
if (type == STREAM_INVERT) {
|
if (type == STREAM_INVERT) {
|
||||||
|
@ -3855,6 +3858,7 @@ static void setInverFunction(SqlFunctionCtx* pCtx, int32_t num, EStreamType type
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
|
static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
SStreamIntervalOperatorInfo* pInfo = pOperator->info;
|
SStreamIntervalOperatorInfo* pInfo = pOperator->info;
|
||||||
|
@ -3947,9 +3951,11 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
|
||||||
// caller. Note that all the time window are not close till now.
|
// caller. Note that all the time window are not close till now.
|
||||||
// the pDataBlock are always the same one, no need to call this again
|
// the pDataBlock are always the same one, no need to call this again
|
||||||
setInputDataBlock(pSup, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true);
|
setInputDataBlock(pSup, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true);
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
if (pInfo->invertible) {
|
if (pInfo->invertible) {
|
||||||
setInverFunction(pSup->pCtx, pOperator->exprSupp.numOfExprs, pBlock->info.type);
|
setInverFunction(pSup->pCtx, pOperator->exprSupp.numOfExprs, pBlock->info.type);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
doStreamIntervalAggImpl(pOperator, pBlock, pBlock->info.id.groupId, pInfo->pUpdatedMap);
|
doStreamIntervalAggImpl(pOperator, pBlock, pBlock->info.id.groupId, pInfo->pUpdatedMap);
|
||||||
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
|
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
|
||||||
|
|
|
@ -450,7 +450,7 @@ int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext, SDataBl
|
||||||
TSKEY next = primaryKeys[startPos];
|
TSKEY next = primaryKeys[startPos];
|
||||||
if (pInterval->intervalUnit == 'n' || pInterval->intervalUnit == 'y') {
|
if (pInterval->intervalUnit == 'n' || pInterval->intervalUnit == 'y') {
|
||||||
pNext->skey = taosTimeTruncate(next, pInterval);
|
pNext->skey = taosTimeTruncate(next, pInterval);
|
||||||
pNext->ekey = taosTimeAdd(pNext->skey, pInterval->interval, pInterval->intervalUnit, precision) - 1;
|
pNext->ekey = taosTimeGetIntervalEnd(pNext->skey, pInterval);
|
||||||
} else {
|
} else {
|
||||||
pNext->ekey += ((next - pNext->ekey + pInterval->sliding - 1) / pInterval->sliding) * pInterval->sliding;
|
pNext->ekey += ((next - pNext->ekey + pInterval->sliding - 1) / pInterval->sliding) * pInterval->sliding;
|
||||||
pNext->skey = pNext->ekey - pInterval->interval + 1;
|
pNext->skey = pNext->ekey - pInterval->interval + 1;
|
||||||
|
@ -459,7 +459,7 @@ int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext, SDataBl
|
||||||
TSKEY next = primaryKeys[startPos];
|
TSKEY next = primaryKeys[startPos];
|
||||||
if (pInterval->intervalUnit == 'n' || pInterval->intervalUnit == 'y') {
|
if (pInterval->intervalUnit == 'n' || pInterval->intervalUnit == 'y') {
|
||||||
pNext->skey = taosTimeTruncate(next, pInterval);
|
pNext->skey = taosTimeTruncate(next, pInterval);
|
||||||
pNext->ekey = taosTimeAdd(pNext->skey, pInterval->interval, pInterval->intervalUnit, precision) - 1;
|
pNext->ekey = taosTimeGetIntervalEnd(pNext->skey, pInterval);
|
||||||
} else {
|
} else {
|
||||||
pNext->skey -= ((pNext->skey - next + pInterval->sliding - 1) / pInterval->sliding) * pInterval->sliding;
|
pNext->skey -= ((pNext->skey - next + pInterval->sliding - 1) / pInterval->sliding) * pInterval->sliding;
|
||||||
pNext->ekey = pNext->skey + pInterval->interval - 1;
|
pNext->ekey = pNext->skey + pInterval->interval - 1;
|
||||||
|
@ -1079,16 +1079,6 @@ static SSDataBlock* doBuildIntervalResult(SOperatorInfo* pOperator) {
|
||||||
return (rows == 0) ? NULL : pBlock;
|
return (rows == 0) ? NULL : pBlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void setInverFunction(SqlFunctionCtx* pCtx, int32_t num, EStreamType type) {
|
|
||||||
for (int i = 0; i < num; i++) {
|
|
||||||
if (type == STREAM_INVERT) {
|
|
||||||
fmSetInvertFunc(pCtx[i].functionId, &(pCtx[i].fpSet));
|
|
||||||
} else if (type == STREAM_NORMAL) {
|
|
||||||
fmSetNormalFunc(pCtx[i].functionId, &(pCtx[i].fpSet));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void doClearWindowImpl(SResultRowPosition* p1, SDiskbasedBuf* pResultBuf, SExprSupp* pSup, int32_t numOfOutput) {
|
static void doClearWindowImpl(SResultRowPosition* p1, SDiskbasedBuf* pResultBuf, SExprSupp* pSup, int32_t numOfOutput) {
|
||||||
SResultRow* pResult = getResultRowByPos(pResultBuf, p1, false);
|
SResultRow* pResult = getResultRowByPos(pResultBuf, p1, false);
|
||||||
if (NULL == pResult) {
|
if (NULL == pResult) {
|
||||||
|
|
|
@ -158,4 +158,32 @@ TEST(testCase, timewindow_gen) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(testCase, timewindow_natural) {
|
||||||
|
osSetTimezone("CST");
|
||||||
|
|
||||||
|
int32_t precision = TSDB_TIME_PRECISION_MILLI;
|
||||||
|
|
||||||
|
SInterval interval2 = createInterval(17, 17, 13392000000, 'n', 'n', 0, precision);
|
||||||
|
int64_t key = 1648970865984;
|
||||||
|
STimeWindow w0 = getAlignQueryTimeWindow(&interval2, key);
|
||||||
|
printTimeWindow(&w0, precision, key);
|
||||||
|
ASSERT_GE(w0.ekey, key);
|
||||||
|
|
||||||
|
int64_t key1 = 1633446027072;
|
||||||
|
STimeWindow w1 = {0};
|
||||||
|
getInitialStartTimeWindow(&interval2, key1, &w1, true);
|
||||||
|
printTimeWindow(&w1, precision, key1);
|
||||||
|
STimeWindow w3 = getAlignQueryTimeWindow(&interval2, key1);
|
||||||
|
printf("%ld win %ld, %ld\n", key1, w3.skey, w3.ekey);
|
||||||
|
|
||||||
|
int64_t key2 = 1648758398208;
|
||||||
|
STimeWindow w2 = {0};
|
||||||
|
getInitialStartTimeWindow(&interval2, key2, &w2, true);
|
||||||
|
printTimeWindow(&w2, precision, key2);
|
||||||
|
STimeWindow w4 = getAlignQueryTimeWindow(&interval2, key2);
|
||||||
|
printf("%ld win %ld, %ld\n", key2, w3.skey, w3.ekey);
|
||||||
|
|
||||||
|
ASSERT_EQ(w3.skey, w4.skey);
|
||||||
|
ASSERT_EQ(w3.ekey, w4.ekey);
|
||||||
|
}
|
||||||
#pragma GCC diagnostic pop
|
#pragma GCC diagnostic pop
|
|
@ -40,7 +40,9 @@ typedef struct SBuiltinFuncDefinition {
|
||||||
FExecProcess processFunc;
|
FExecProcess processFunc;
|
||||||
FScalarExecProcess sprocessFunc;
|
FScalarExecProcess sprocessFunc;
|
||||||
FExecFinalize finalizeFunc;
|
FExecFinalize finalizeFunc;
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
FExecProcess invertFunc;
|
FExecProcess invertFunc;
|
||||||
|
#endif
|
||||||
FExecCombine combineFunc;
|
FExecCombine combineFunc;
|
||||||
const char* pPartialFunc;
|
const char* pPartialFunc;
|
||||||
const char* pMergeFunc;
|
const char* pMergeFunc;
|
||||||
|
|
|
@ -59,12 +59,19 @@ int32_t combineFunction(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
|
||||||
EFuncDataRequired countDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow);
|
EFuncDataRequired countDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow);
|
||||||
bool getCountFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
|
bool getCountFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
|
||||||
int32_t countFunction(SqlFunctionCtx* pCtx);
|
int32_t countFunction(SqlFunctionCtx* pCtx);
|
||||||
|
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
int32_t countInvertFunction(SqlFunctionCtx* pCtx);
|
int32_t countInvertFunction(SqlFunctionCtx* pCtx);
|
||||||
|
#endif
|
||||||
|
|
||||||
EFuncDataRequired statisDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow);
|
EFuncDataRequired statisDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow);
|
||||||
bool getSumFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
|
bool getSumFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
|
||||||
int32_t sumFunction(SqlFunctionCtx* pCtx);
|
int32_t sumFunction(SqlFunctionCtx* pCtx);
|
||||||
|
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
int32_t sumInvertFunction(SqlFunctionCtx* pCtx);
|
int32_t sumInvertFunction(SqlFunctionCtx* pCtx);
|
||||||
|
#endif
|
||||||
|
|
||||||
int32_t sumCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
|
int32_t sumCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
|
||||||
|
|
||||||
bool minmaxFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo);
|
bool minmaxFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo);
|
||||||
|
@ -81,7 +88,11 @@ int32_t avgFunction(SqlFunctionCtx* pCtx);
|
||||||
int32_t avgFunctionMerge(SqlFunctionCtx* pCtx);
|
int32_t avgFunctionMerge(SqlFunctionCtx* pCtx);
|
||||||
int32_t avgFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
int32_t avgFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
||||||
int32_t avgPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
int32_t avgPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
||||||
|
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
int32_t avgInvertFunction(SqlFunctionCtx* pCtx);
|
int32_t avgInvertFunction(SqlFunctionCtx* pCtx);
|
||||||
|
#endif
|
||||||
|
|
||||||
int32_t avgCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
|
int32_t avgCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
|
||||||
int32_t getAvgInfoSize();
|
int32_t getAvgInfoSize();
|
||||||
|
|
||||||
|
@ -91,7 +102,11 @@ int32_t stddevFunction(SqlFunctionCtx* pCtx);
|
||||||
int32_t stddevFunctionMerge(SqlFunctionCtx* pCtx);
|
int32_t stddevFunctionMerge(SqlFunctionCtx* pCtx);
|
||||||
int32_t stddevFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
int32_t stddevFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
||||||
int32_t stddevPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
int32_t stddevPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
||||||
|
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
int32_t stddevInvertFunction(SqlFunctionCtx* pCtx);
|
int32_t stddevInvertFunction(SqlFunctionCtx* pCtx);
|
||||||
|
#endif
|
||||||
|
|
||||||
int32_t stddevCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
|
int32_t stddevCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
|
||||||
int32_t getStddevInfoSize();
|
int32_t getStddevInfoSize();
|
||||||
|
|
||||||
|
@ -99,7 +114,6 @@ bool getLeastSQRFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
|
||||||
bool leastSQRFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo);
|
bool leastSQRFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo);
|
||||||
int32_t leastSQRFunction(SqlFunctionCtx* pCtx);
|
int32_t leastSQRFunction(SqlFunctionCtx* pCtx);
|
||||||
int32_t leastSQRFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
int32_t leastSQRFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
||||||
int32_t leastSQRInvertFunction(SqlFunctionCtx* pCtx);
|
|
||||||
int32_t leastSQRCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
|
int32_t leastSQRCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
|
||||||
|
|
||||||
bool getPercentileFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
|
bool getPercentileFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
|
||||||
|
|
|
@ -2368,7 +2368,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.processFunc = countFunction,
|
.processFunc = countFunction,
|
||||||
.sprocessFunc = countScalarFunction,
|
.sprocessFunc = countScalarFunction,
|
||||||
.finalizeFunc = functionFinalize,
|
.finalizeFunc = functionFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = countInvertFunction,
|
.invertFunc = countInvertFunction,
|
||||||
|
#endif
|
||||||
.combineFunc = combineFunction,
|
.combineFunc = combineFunction,
|
||||||
.pPartialFunc = "count",
|
.pPartialFunc = "count",
|
||||||
.pMergeFunc = "sum"
|
.pMergeFunc = "sum"
|
||||||
|
@ -2384,7 +2386,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.processFunc = sumFunction,
|
.processFunc = sumFunction,
|
||||||
.sprocessFunc = sumScalarFunction,
|
.sprocessFunc = sumScalarFunction,
|
||||||
.finalizeFunc = functionFinalize,
|
.finalizeFunc = functionFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = sumInvertFunction,
|
.invertFunc = sumInvertFunction,
|
||||||
|
#endif
|
||||||
.combineFunc = sumCombine,
|
.combineFunc = sumCombine,
|
||||||
.pPartialFunc = "sum",
|
.pPartialFunc = "sum",
|
||||||
.pMergeFunc = "sum"
|
.pMergeFunc = "sum"
|
||||||
|
@ -2429,7 +2433,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.processFunc = stddevFunction,
|
.processFunc = stddevFunction,
|
||||||
.sprocessFunc = stddevScalarFunction,
|
.sprocessFunc = stddevScalarFunction,
|
||||||
.finalizeFunc = stddevFinalize,
|
.finalizeFunc = stddevFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = stddevInvertFunction,
|
.invertFunc = stddevInvertFunction,
|
||||||
|
#endif
|
||||||
.combineFunc = stddevCombine,
|
.combineFunc = stddevCombine,
|
||||||
.pPartialFunc = "_stddev_partial",
|
.pPartialFunc = "_stddev_partial",
|
||||||
.pMergeFunc = "_stddev_merge"
|
.pMergeFunc = "_stddev_merge"
|
||||||
|
@ -2443,7 +2449,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.initFunc = stddevFunctionSetup,
|
.initFunc = stddevFunctionSetup,
|
||||||
.processFunc = stddevFunction,
|
.processFunc = stddevFunction,
|
||||||
.finalizeFunc = stddevPartialFinalize,
|
.finalizeFunc = stddevPartialFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = stddevInvertFunction,
|
.invertFunc = stddevInvertFunction,
|
||||||
|
#endif
|
||||||
.combineFunc = stddevCombine,
|
.combineFunc = stddevCombine,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2455,7 +2463,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.initFunc = stddevFunctionSetup,
|
.initFunc = stddevFunctionSetup,
|
||||||
.processFunc = stddevFunctionMerge,
|
.processFunc = stddevFunctionMerge,
|
||||||
.finalizeFunc = stddevFinalize,
|
.finalizeFunc = stddevFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = stddevInvertFunction,
|
.invertFunc = stddevInvertFunction,
|
||||||
|
#endif
|
||||||
.combineFunc = stddevCombine,
|
.combineFunc = stddevCombine,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2468,7 +2478,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.processFunc = leastSQRFunction,
|
.processFunc = leastSQRFunction,
|
||||||
.sprocessFunc = leastSQRScalarFunction,
|
.sprocessFunc = leastSQRScalarFunction,
|
||||||
.finalizeFunc = leastSQRFinalize,
|
.finalizeFunc = leastSQRFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
|
#endif
|
||||||
.combineFunc = leastSQRCombine,
|
.combineFunc = leastSQRCombine,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2482,7 +2494,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.processFunc = avgFunction,
|
.processFunc = avgFunction,
|
||||||
.sprocessFunc = avgScalarFunction,
|
.sprocessFunc = avgScalarFunction,
|
||||||
.finalizeFunc = avgFinalize,
|
.finalizeFunc = avgFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = avgInvertFunction,
|
.invertFunc = avgInvertFunction,
|
||||||
|
#endif
|
||||||
.combineFunc = avgCombine,
|
.combineFunc = avgCombine,
|
||||||
.pPartialFunc = "_avg_partial",
|
.pPartialFunc = "_avg_partial",
|
||||||
.pMergeFunc = "_avg_merge"
|
.pMergeFunc = "_avg_merge"
|
||||||
|
@ -2497,7 +2511,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.initFunc = avgFunctionSetup,
|
.initFunc = avgFunctionSetup,
|
||||||
.processFunc = avgFunction,
|
.processFunc = avgFunction,
|
||||||
.finalizeFunc = avgPartialFinalize,
|
.finalizeFunc = avgPartialFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = avgInvertFunction,
|
.invertFunc = avgInvertFunction,
|
||||||
|
#endif
|
||||||
.combineFunc = avgCombine,
|
.combineFunc = avgCombine,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2509,7 +2525,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.initFunc = avgFunctionSetup,
|
.initFunc = avgFunctionSetup,
|
||||||
.processFunc = avgFunctionMerge,
|
.processFunc = avgFunctionMerge,
|
||||||
.finalizeFunc = avgFinalize,
|
.finalizeFunc = avgFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = avgInvertFunction,
|
.invertFunc = avgInvertFunction,
|
||||||
|
#endif
|
||||||
.combineFunc = avgCombine,
|
.combineFunc = avgCombine,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2523,7 +2541,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.processFunc = percentileFunction,
|
.processFunc = percentileFunction,
|
||||||
.sprocessFunc = percentileScalarFunction,
|
.sprocessFunc = percentileScalarFunction,
|
||||||
.finalizeFunc = percentileFinalize,
|
.finalizeFunc = percentileFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
|
#endif
|
||||||
.combineFunc = NULL,
|
.combineFunc = NULL,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2536,7 +2556,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.processFunc = apercentileFunction,
|
.processFunc = apercentileFunction,
|
||||||
.sprocessFunc = apercentileScalarFunction,
|
.sprocessFunc = apercentileScalarFunction,
|
||||||
.finalizeFunc = apercentileFinalize,
|
.finalizeFunc = apercentileFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
|
#endif
|
||||||
.combineFunc = apercentileCombine,
|
.combineFunc = apercentileCombine,
|
||||||
.pPartialFunc = "_apercentile_partial",
|
.pPartialFunc = "_apercentile_partial",
|
||||||
.pMergeFunc = "_apercentile_merge",
|
.pMergeFunc = "_apercentile_merge",
|
||||||
|
@ -2551,7 +2573,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.initFunc = apercentileFunctionSetup,
|
.initFunc = apercentileFunctionSetup,
|
||||||
.processFunc = apercentileFunction,
|
.processFunc = apercentileFunction,
|
||||||
.finalizeFunc = apercentilePartialFinalize,
|
.finalizeFunc = apercentilePartialFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
|
#endif
|
||||||
.combineFunc = apercentileCombine,
|
.combineFunc = apercentileCombine,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2563,7 +2587,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.initFunc = apercentileFunctionSetup,
|
.initFunc = apercentileFunctionSetup,
|
||||||
.processFunc = apercentileFunctionMerge,
|
.processFunc = apercentileFunctionMerge,
|
||||||
.finalizeFunc = apercentileFinalize,
|
.finalizeFunc = apercentileFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
|
#endif
|
||||||
.combineFunc = apercentileCombine,
|
.combineFunc = apercentileCombine,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2609,7 +2635,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.processFunc = spreadFunction,
|
.processFunc = spreadFunction,
|
||||||
.sprocessFunc = spreadScalarFunction,
|
.sprocessFunc = spreadScalarFunction,
|
||||||
.finalizeFunc = spreadFinalize,
|
.finalizeFunc = spreadFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
|
#endif
|
||||||
.combineFunc = spreadCombine,
|
.combineFunc = spreadCombine,
|
||||||
.pPartialFunc = "_spread_partial",
|
.pPartialFunc = "_spread_partial",
|
||||||
.pMergeFunc = "_spread_merge"
|
.pMergeFunc = "_spread_merge"
|
||||||
|
@ -2624,7 +2652,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.initFunc = spreadFunctionSetup,
|
.initFunc = spreadFunctionSetup,
|
||||||
.processFunc = spreadFunction,
|
.processFunc = spreadFunction,
|
||||||
.finalizeFunc = spreadPartialFinalize,
|
.finalizeFunc = spreadPartialFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
|
#endif
|
||||||
.combineFunc = spreadCombine,
|
.combineFunc = spreadCombine,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2637,7 +2667,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.initFunc = spreadFunctionSetup,
|
.initFunc = spreadFunctionSetup,
|
||||||
.processFunc = spreadFunctionMerge,
|
.processFunc = spreadFunctionMerge,
|
||||||
.finalizeFunc = spreadFinalize,
|
.finalizeFunc = spreadFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
|
#endif
|
||||||
.combineFunc = spreadCombine,
|
.combineFunc = spreadCombine,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2651,7 +2683,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.initFunc = elapsedFunctionSetup,
|
.initFunc = elapsedFunctionSetup,
|
||||||
.processFunc = elapsedFunction,
|
.processFunc = elapsedFunction,
|
||||||
.finalizeFunc = elapsedFinalize,
|
.finalizeFunc = elapsedFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
|
#endif
|
||||||
.combineFunc = elapsedCombine,
|
.combineFunc = elapsedCombine,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2664,7 +2698,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.initFunc = elapsedFunctionSetup,
|
.initFunc = elapsedFunctionSetup,
|
||||||
.processFunc = elapsedFunction,
|
.processFunc = elapsedFunction,
|
||||||
.finalizeFunc = elapsedPartialFinalize,
|
.finalizeFunc = elapsedPartialFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
|
#endif
|
||||||
.combineFunc = elapsedCombine,
|
.combineFunc = elapsedCombine,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2677,7 +2713,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.initFunc = elapsedFunctionSetup,
|
.initFunc = elapsedFunctionSetup,
|
||||||
.processFunc = elapsedFunctionMerge,
|
.processFunc = elapsedFunctionMerge,
|
||||||
.finalizeFunc = elapsedFinalize,
|
.finalizeFunc = elapsedFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
|
#endif
|
||||||
.combineFunc = elapsedCombine,
|
.combineFunc = elapsedCombine,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2907,7 +2945,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.processFunc = histogramFunction,
|
.processFunc = histogramFunction,
|
||||||
.sprocessFunc = histogramScalarFunction,
|
.sprocessFunc = histogramScalarFunction,
|
||||||
.finalizeFunc = histogramFinalize,
|
.finalizeFunc = histogramFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
|
#endif
|
||||||
.combineFunc = histogramCombine,
|
.combineFunc = histogramCombine,
|
||||||
.pPartialFunc = "_histogram_partial",
|
.pPartialFunc = "_histogram_partial",
|
||||||
.pMergeFunc = "_histogram_merge",
|
.pMergeFunc = "_histogram_merge",
|
||||||
|
@ -2921,7 +2961,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.initFunc = histogramFunctionSetup,
|
.initFunc = histogramFunctionSetup,
|
||||||
.processFunc = histogramFunctionPartial,
|
.processFunc = histogramFunctionPartial,
|
||||||
.finalizeFunc = histogramPartialFinalize,
|
.finalizeFunc = histogramPartialFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
|
#endif
|
||||||
.combineFunc = histogramCombine,
|
.combineFunc = histogramCombine,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2933,7 +2975,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.initFunc = functionSetup,
|
.initFunc = functionSetup,
|
||||||
.processFunc = histogramFunctionMerge,
|
.processFunc = histogramFunctionMerge,
|
||||||
.finalizeFunc = histogramFinalize,
|
.finalizeFunc = histogramFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
|
#endif
|
||||||
.combineFunc = histogramCombine,
|
.combineFunc = histogramCombine,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2946,7 +2990,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.processFunc = hllFunction,
|
.processFunc = hllFunction,
|
||||||
.sprocessFunc = hllScalarFunction,
|
.sprocessFunc = hllScalarFunction,
|
||||||
.finalizeFunc = hllFinalize,
|
.finalizeFunc = hllFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
|
#endif
|
||||||
.combineFunc = hllCombine,
|
.combineFunc = hllCombine,
|
||||||
.pPartialFunc = "_hyperloglog_partial",
|
.pPartialFunc = "_hyperloglog_partial",
|
||||||
.pMergeFunc = "_hyperloglog_merge"
|
.pMergeFunc = "_hyperloglog_merge"
|
||||||
|
@ -2960,7 +3006,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.initFunc = functionSetup,
|
.initFunc = functionSetup,
|
||||||
.processFunc = hllFunction,
|
.processFunc = hllFunction,
|
||||||
.finalizeFunc = hllPartialFinalize,
|
.finalizeFunc = hllPartialFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
|
#endif
|
||||||
.combineFunc = hllCombine,
|
.combineFunc = hllCombine,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -2972,7 +3020,9 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
||||||
.initFunc = functionSetup,
|
.initFunc = functionSetup,
|
||||||
.processFunc = hllFunctionMerge,
|
.processFunc = hllFunctionMerge,
|
||||||
.finalizeFunc = hllFinalize,
|
.finalizeFunc = hllFinalize,
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
.invertFunc = NULL,
|
.invertFunc = NULL,
|
||||||
|
#endif
|
||||||
.combineFunc = hllCombine,
|
.combineFunc = hllCombine,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -549,6 +549,7 @@ int32_t countFunction(SqlFunctionCtx* pCtx) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
int32_t countInvertFunction(SqlFunctionCtx* pCtx) {
|
int32_t countInvertFunction(SqlFunctionCtx* pCtx) {
|
||||||
int64_t numOfElem = getNumOfElems(pCtx);
|
int64_t numOfElem = getNumOfElems(pCtx);
|
||||||
|
|
||||||
|
@ -559,6 +560,7 @@ int32_t countInvertFunction(SqlFunctionCtx* pCtx) {
|
||||||
SET_VAL(pResInfo, *((int64_t*)buf), 1);
|
SET_VAL(pResInfo, *((int64_t*)buf), 1);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
int32_t combineFunction(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
|
int32_t combineFunction(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
|
||||||
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
|
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
|
||||||
|
@ -642,6 +644,7 @@ _sum_over:
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
int32_t sumInvertFunction(SqlFunctionCtx* pCtx) {
|
int32_t sumInvertFunction(SqlFunctionCtx* pCtx) {
|
||||||
int32_t numOfElem = 0;
|
int32_t numOfElem = 0;
|
||||||
|
|
||||||
|
@ -699,6 +702,7 @@ int32_t sumInvertFunction(SqlFunctionCtx* pCtx) {
|
||||||
SET_VAL(GET_RES_INFO(pCtx), numOfElem, 1);
|
SET_VAL(GET_RES_INFO(pCtx), numOfElem, 1);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
int32_t sumCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
|
int32_t sumCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
|
||||||
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
|
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
|
||||||
|
@ -1230,6 +1234,7 @@ int32_t stddevFunctionMerge(SqlFunctionCtx* pCtx) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
int32_t stddevInvertFunction(SqlFunctionCtx* pCtx) {
|
int32_t stddevInvertFunction(SqlFunctionCtx* pCtx) {
|
||||||
int32_t numOfElem = 0;
|
int32_t numOfElem = 0;
|
||||||
|
|
||||||
|
@ -1294,6 +1299,7 @@ int32_t stddevInvertFunction(SqlFunctionCtx* pCtx) {
|
||||||
SET_VAL(GET_RES_INFO(pCtx), numOfElem, 1);
|
SET_VAL(GET_RES_INFO(pCtx), numOfElem, 1);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
int32_t stddevFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
int32_t stddevFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
||||||
SInputColumnInfoData* pInput = &pCtx->input;
|
SInputColumnInfoData* pInput = &pCtx->input;
|
||||||
|
@ -1578,11 +1584,6 @@ int32_t leastSQRFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
||||||
return pResInfo->numOfRes;
|
return pResInfo->numOfRes;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t leastSQRInvertFunction(SqlFunctionCtx* pCtx) {
|
|
||||||
// TODO
|
|
||||||
return TSDB_CODE_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t leastSQRCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
|
int32_t leastSQRCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
|
||||||
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
|
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
|
||||||
SLeastSQRInfo* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
|
SLeastSQRInfo* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
|
||||||
|
|
|
@ -724,6 +724,7 @@ int32_t avgFunctionMerge(SqlFunctionCtx* pCtx) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
int32_t avgInvertFunction(SqlFunctionCtx* pCtx) {
|
int32_t avgInvertFunction(SqlFunctionCtx* pCtx) {
|
||||||
int32_t numOfElem = 0;
|
int32_t numOfElem = 0;
|
||||||
|
|
||||||
|
@ -786,6 +787,7 @@ int32_t avgInvertFunction(SqlFunctionCtx* pCtx) {
|
||||||
SET_VAL(GET_RES_INFO(pCtx), numOfElem, 1);
|
SET_VAL(GET_RES_INFO(pCtx), numOfElem, 1);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
int32_t avgCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
|
int32_t avgCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
|
||||||
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
|
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
|
||||||
|
|
|
@ -281,6 +281,7 @@ void fmFuncMgtDestroy() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef BUILD_NO_CALL
|
||||||
int32_t fmSetInvertFunc(int32_t funcId, SFuncExecFuncs* pFpSet) {
|
int32_t fmSetInvertFunc(int32_t funcId, SFuncExecFuncs* pFpSet) {
|
||||||
if (fmIsUserDefinedFunc(funcId) || funcId < 0 || funcId >= funcMgtBuiltinsNum) {
|
if (fmIsUserDefinedFunc(funcId) || funcId < 0 || funcId >= funcMgtBuiltinsNum) {
|
||||||
return TSDB_CODE_FAILED;
|
return TSDB_CODE_FAILED;
|
||||||
|
@ -314,6 +315,7 @@ bool fmIsInvertible(int32_t funcId) {
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// function has same input/output type
|
// function has same input/output type
|
||||||
bool fmIsSameInOutType(int32_t funcId) {
|
bool fmIsSameInOutType(int32_t funcId) {
|
||||||
|
|
|
@ -371,6 +371,18 @@ SNode* createValueNode(SAstCreateContext* pCxt, int32_t dataType, const SToken*
|
||||||
return (SNode*)val;
|
return (SNode*)val;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool hasHint(SNodeList* pHintList, EHintOption hint) {
|
||||||
|
if (!pHintList) return false;
|
||||||
|
SNode* pNode;
|
||||||
|
FOREACH(pNode, pHintList) {
|
||||||
|
SHintNode* pHint = (SHintNode*)pNode;
|
||||||
|
if (pHint->option == hint) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
bool addHintNodeToList(SAstCreateContext* pCxt, SNodeList** ppHintList, EHintOption opt, SToken* paramList,
|
bool addHintNodeToList(SAstCreateContext* pCxt, SNodeList** ppHintList, EHintOption opt, SToken* paramList,
|
||||||
int32_t paramNum) {
|
int32_t paramNum) {
|
||||||
void* value = NULL;
|
void* value = NULL;
|
||||||
|
@ -384,6 +396,10 @@ bool addHintNodeToList(SAstCreateContext* pCxt, SNodeList** ppHintList, EHintOpt
|
||||||
}
|
}
|
||||||
case HINT_SORT_FOR_GROUP:
|
case HINT_SORT_FOR_GROUP:
|
||||||
if (paramNum > 0) return true;
|
if (paramNum > 0) return true;
|
||||||
|
if (hasHint(*ppHintList, HINT_PARTITION_FIRST)) return true;
|
||||||
|
break;
|
||||||
|
case HINT_PARTITION_FIRST:
|
||||||
|
if (paramNum > 0 || hasHint(*ppHintList, HINT_SORT_FOR_GROUP)) return true;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return true;
|
return true;
|
||||||
|
@ -455,6 +471,14 @@ SNodeList* createHintNodeList(SAstCreateContext* pCxt, const SToken* pLiteral) {
|
||||||
}
|
}
|
||||||
opt = HINT_SORT_FOR_GROUP;
|
opt = HINT_SORT_FOR_GROUP;
|
||||||
break;
|
break;
|
||||||
|
case TK_PARTITION_FIRST:
|
||||||
|
lastComma = false;
|
||||||
|
if (0 != opt || inParamList) {
|
||||||
|
quit = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
opt = HINT_PARTITION_FIRST;
|
||||||
|
break;
|
||||||
case TK_NK_LP:
|
case TK_NK_LP:
|
||||||
lastComma = false;
|
lastComma = false;
|
||||||
if (0 == opt || inParamList) {
|
if (0 == opt || inParamList) {
|
||||||
|
|
|
@ -170,6 +170,7 @@ static SKeyword keywordTable[] = {
|
||||||
{"PAGES", TK_PAGES},
|
{"PAGES", TK_PAGES},
|
||||||
{"PAGESIZE", TK_PAGESIZE},
|
{"PAGESIZE", TK_PAGESIZE},
|
||||||
{"PARTITION", TK_PARTITION},
|
{"PARTITION", TK_PARTITION},
|
||||||
|
{"PARTITION_FIRST", TK_PARTITION_FIRST},
|
||||||
{"PASS", TK_PASS},
|
{"PASS", TK_PASS},
|
||||||
{"PORT", TK_PORT},
|
{"PORT", TK_PORT},
|
||||||
{"PPS", TK_PPS},
|
{"PPS", TK_PPS},
|
||||||
|
|
|
@ -3158,7 +3158,11 @@ int32_t translateTable(STranslateContext* pCxt, SNode** pTable) {
|
||||||
((SSelectStmt*)pTempTable->pSubquery)->isEmptyResult && isSelectStmt(pCxt->pCurrStmt)) {
|
((SSelectStmt*)pTempTable->pSubquery)->isEmptyResult && isSelectStmt(pCxt->pCurrStmt)) {
|
||||||
((SSelectStmt*)pCxt->pCurrStmt)->isEmptyResult = true;
|
((SSelectStmt*)pCxt->pCurrStmt)->isEmptyResult = true;
|
||||||
}
|
}
|
||||||
|
if (QUERY_NODE_SELECT_STMT == nodeType(pTempTable->pSubquery) && isSelectStmt(pCxt->pCurrStmt)) {
|
||||||
|
SSelectStmt* pSubStmt = (SSelectStmt*)pTempTable->pSubquery;
|
||||||
|
SSelectStmt* pCurrSmt = (SSelectStmt*)(pCxt->pCurrStmt);
|
||||||
|
pCurrSmt->timeLineResMode = pSubStmt->timeLineResMode;
|
||||||
|
}
|
||||||
pTempTable->table.precision = getStmtPrecision(pTempTable->pSubquery);
|
pTempTable->table.precision = getStmtPrecision(pTempTable->pSubquery);
|
||||||
pTempTable->table.singleTable = stmtIsSingleTable(pTempTable->pSubquery);
|
pTempTable->table.singleTable = stmtIsSingleTable(pTempTable->pSubquery);
|
||||||
code = addNamespace(pCxt, pTempTable);
|
code = addNamespace(pCxt, pTempTable);
|
||||||
|
|
|
@ -47,6 +47,7 @@ int32_t validateQueryPlan(SPlanContext* pCxt, SQueryPlan* pPlan);
|
||||||
|
|
||||||
bool getBatchScanOptionFromHint(SNodeList* pList);
|
bool getBatchScanOptionFromHint(SNodeList* pList);
|
||||||
bool getSortForGroupOptHint(SNodeList* pList);
|
bool getSortForGroupOptHint(SNodeList* pList);
|
||||||
|
bool getOptHint(SNodeList* pList, EHintOption hint);
|
||||||
SLogicNode* getLogicNodeRootNode(SLogicNode* pCurr);
|
SLogicNode* getLogicNodeRootNode(SLogicNode* pCurr);
|
||||||
int32_t collectTableAliasFromNodes(SNode* pNode, SSHashObj** ppRes);
|
int32_t collectTableAliasFromNodes(SNode* pNode, SSHashObj** ppRes);
|
||||||
bool isPartTableAgg(SAggLogicNode* pAgg);
|
bool isPartTableAgg(SAggLogicNode* pAgg);
|
||||||
|
|
|
@ -3997,7 +3997,8 @@ static int32_t partitionColsOpt(SOptimizeContext* pCxt, SLogicSubplan* pLogicSub
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return code;
|
return code;
|
||||||
} else if (pNode->node.pParent && nodeType(pNode->node.pParent) == QUERY_NODE_LOGIC_PLAN_AGG) {
|
} else if (pNode->node.pParent && nodeType(pNode->node.pParent) == QUERY_NODE_LOGIC_PLAN_AGG &&
|
||||||
|
!getOptHint(pRootNode->pHint, HINT_PARTITION_FIRST)) {
|
||||||
// Check if we can delete partition node
|
// Check if we can delete partition node
|
||||||
SAggLogicNode* pAgg = (SAggLogicNode*)pNode->node.pParent;
|
SAggLogicNode* pAgg = (SAggLogicNode*)pNode->node.pParent;
|
||||||
FOREACH(node, pNode->pPartitionKeys) {
|
FOREACH(node, pNode->pPartitionKeys) {
|
||||||
|
|
|
@ -432,6 +432,7 @@ bool getBatchScanOptionFromHint(SNodeList* pList) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool getSortForGroupOptHint(SNodeList* pList) {
|
bool getSortForGroupOptHint(SNodeList* pList) {
|
||||||
|
if (!pList) return false;
|
||||||
SNode* pNode;
|
SNode* pNode;
|
||||||
FOREACH(pNode, pList) {
|
FOREACH(pNode, pList) {
|
||||||
SHintNode* pHint = (SHintNode*)pNode;
|
SHintNode* pHint = (SHintNode*)pNode;
|
||||||
|
@ -442,6 +443,18 @@ bool getSortForGroupOptHint(SNodeList* pList) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool getOptHint(SNodeList* pList, EHintOption hint) {
|
||||||
|
if (!pList) return false;
|
||||||
|
SNode* pNode;
|
||||||
|
FOREACH(pNode, pList) {
|
||||||
|
SHintNode* pHint = (SHintNode*)pNode;
|
||||||
|
if (pHint->option == hint) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t collectTableAliasFromNodes(SNode* pNode, SSHashObj** ppRes) {
|
int32_t collectTableAliasFromNodes(SNode* pNode, SSHashObj** ppRes) {
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
SLogicNode* pCurr = (SLogicNode*)pNode;
|
SLogicNode* pCurr = (SLogicNode*)pNode;
|
||||||
|
|
|
@ -92,6 +92,9 @@ extern int32_t streamBackendId;
|
||||||
extern int32_t streamBackendCfWrapperId;
|
extern int32_t streamBackendCfWrapperId;
|
||||||
extern int32_t taskDbWrapperId;
|
extern int32_t taskDbWrapperId;
|
||||||
|
|
||||||
|
int32_t streamTimerInit();
|
||||||
|
void streamTimerCleanUp();
|
||||||
|
|
||||||
void streamRetryDispatchData(SStreamTask* pTask, int64_t waitDuration);
|
void streamRetryDispatchData(SStreamTask* pTask, int64_t waitDuration);
|
||||||
int32_t streamDispatchStreamBlock(SStreamTask* pTask);
|
int32_t streamDispatchStreamBlock(SStreamTask* pTask);
|
||||||
void destroyDispatchMsg(SStreamDispatchReq* pReq, int32_t numOfVgroups);
|
void destroyDispatchMsg(SStreamDispatchReq* pReq, int32_t numOfVgroups);
|
||||||
|
|
|
@ -23,11 +23,6 @@ extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// moore finite state machine for stream task
|
// moore finite state machine for stream task
|
||||||
typedef struct SStreamTaskState {
|
|
||||||
ETaskStatus state;
|
|
||||||
char* name;
|
|
||||||
} SStreamTaskState;
|
|
||||||
|
|
||||||
typedef int32_t (*__state_trans_fn)(SStreamTask*);
|
typedef int32_t (*__state_trans_fn)(SStreamTask*);
|
||||||
typedef int32_t (*__state_trans_succ_fn)(SStreamTask*);
|
typedef int32_t (*__state_trans_succ_fn)(SStreamTask*);
|
||||||
|
|
||||||
|
|
|
@ -21,13 +21,18 @@ void* streamTimer = NULL;
|
||||||
int32_t streamTimerInit() {
|
int32_t streamTimerInit() {
|
||||||
streamTimer = taosTmrInit(1000, 100, 10000, "STREAM");
|
streamTimer = taosTmrInit(1000, 100, 10000, "STREAM");
|
||||||
if (streamTimer == NULL) {
|
if (streamTimer == NULL) {
|
||||||
|
stError("init stream timer failed, code:%s", tstrerror(terrno));
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
stInfo("init stream timer, %p", streamTimer);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamTimerCleanUp() {
|
void streamTimerCleanUp() {
|
||||||
|
stInfo("cleanup stream timer, %p", streamTimer);
|
||||||
taosTmrCleanUp(streamTimer);
|
taosTmrCleanUp(streamTimer);
|
||||||
|
streamTimer = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
char* createStreamTaskIdStr(int64_t streamId, int32_t taskId) {
|
char* createStreamTaskIdStr(int64_t streamId, int32_t taskId) {
|
||||||
|
@ -49,7 +54,7 @@ static void streamSchedByTimer(void* param, void* tmrId) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (streamTaskGetStatus(pTask, NULL) == TASK_STATUS__CK) {
|
if (streamTaskGetStatus(pTask)->state == TASK_STATUS__CK) {
|
||||||
stDebug("s-task:%s in checkpoint procedure, not retrieve result, next:%dms", id, nextTrigger);
|
stDebug("s-task:%s in checkpoint procedure, not retrieve result, next:%dms", id, nextTrigger);
|
||||||
} else {
|
} else {
|
||||||
if (status == TASK_TRIGGER_STATUS__ACTIVE) {
|
if (status == TASK_TRIGGER_STATUS__ACTIVE) {
|
||||||
|
@ -57,7 +62,7 @@ static void streamSchedByTimer(void* param, void* tmrId) {
|
||||||
if (pTrigger == NULL) {
|
if (pTrigger == NULL) {
|
||||||
stError("s-task:%s failed to prepare retrieve data trigger, code:%s, try again in %dms", id, "out of memory",
|
stError("s-task:%s failed to prepare retrieve data trigger, code:%s, try again in %dms", id, "out of memory",
|
||||||
nextTrigger);
|
nextTrigger);
|
||||||
taosTmrReset(streamSchedByTimer, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pTimer);
|
taosTmrReset(streamSchedByTimer, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pDelayTimer);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,7 +73,7 @@ static void streamSchedByTimer(void* param, void* tmrId) {
|
||||||
|
|
||||||
stError("s-task:%s failed to prepare retrieve data trigger, code:%s, try again in %dms", id, "out of memory",
|
stError("s-task:%s failed to prepare retrieve data trigger, code:%s, try again in %dms", id, "out of memory",
|
||||||
nextTrigger);
|
nextTrigger);
|
||||||
taosTmrReset(streamSchedByTimer, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pTimer);
|
taosTmrReset(streamSchedByTimer, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pDelayTimer);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,7 +82,7 @@ static void streamSchedByTimer(void* param, void* tmrId) {
|
||||||
|
|
||||||
int32_t code = streamTaskPutDataIntoInputQ(pTask, (SStreamQueueItem*)pTrigger);
|
int32_t code = streamTaskPutDataIntoInputQ(pTask, (SStreamQueueItem*)pTrigger);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
taosTmrReset(streamSchedByTimer, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pTimer);
|
taosTmrReset(streamSchedByTimer, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pDelayTimer);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,17 +90,17 @@ static void streamSchedByTimer(void* param, void* tmrId) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
taosTmrReset(streamSchedByTimer, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pTimer);
|
taosTmrReset(streamSchedByTimer, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pDelayTimer);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamSetupScheduleTrigger(SStreamTask* pTask) {
|
int32_t streamSetupScheduleTrigger(SStreamTask* pTask) {
|
||||||
if (pTask->info.triggerParam != 0 && pTask->info.fillHistory == 0) {
|
if (pTask->info.triggerParam != 0 && pTask->info.fillHistory == 0) {
|
||||||
int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1);
|
int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1);
|
||||||
ASSERT(ref == 2 && pTask->schedInfo.pTimer == NULL);
|
ASSERT(ref == 2 && pTask->schedInfo.pDelayTimer == NULL);
|
||||||
|
|
||||||
stDebug("s-task:%s setup scheduler trigger, delay:%" PRId64 " ms", pTask->id.idStr, pTask->info.triggerParam);
|
stDebug("s-task:%s setup scheduler trigger, delay:%" PRId64 " ms", pTask->id.idStr, pTask->info.triggerParam);
|
||||||
|
|
||||||
pTask->schedInfo.pTimer = taosTmrStart(streamSchedByTimer, (int32_t)pTask->info.triggerParam, pTask, streamTimer);
|
pTask->schedInfo.pDelayTimer = taosTmrStart(streamSchedByTimer, (int32_t)pTask->info.triggerParam, pTask, streamTimer);
|
||||||
pTask->schedInfo.status = TASK_TRIGGER_STATUS__INACTIVE;
|
pTask->schedInfo.status = TASK_TRIGGER_STATUS__INACTIVE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -210,6 +215,7 @@ int32_t streamTaskEnqueueRetrieve(SStreamTask* pTask, SStreamRetrieveReq* pReq,
|
||||||
|
|
||||||
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp) {
|
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp) {
|
||||||
int32_t status = 0;
|
int32_t status = 0;
|
||||||
|
SStreamMeta* pMeta = pTask->pMeta;
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
|
|
||||||
stDebug("s-task:%s receive dispatch msg from taskId:0x%x(vgId:%d), msgLen:%" PRId64 ", msgId:%d", id,
|
stDebug("s-task:%s receive dispatch msg from taskId:0x%x(vgId:%d), msgLen:%" PRId64 ", msgId:%d", id,
|
||||||
|
@ -218,7 +224,7 @@ int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, S
|
||||||
SStreamChildEpInfo* pInfo = streamTaskGetUpstreamTaskEpInfo(pTask, pReq->upstreamTaskId);
|
SStreamChildEpInfo* pInfo = streamTaskGetUpstreamTaskEpInfo(pTask, pReq->upstreamTaskId);
|
||||||
ASSERT(pInfo != NULL);
|
ASSERT(pInfo != NULL);
|
||||||
|
|
||||||
if (pTask->pMeta->role == NODE_ROLE_FOLLOWER) {
|
if (pMeta->role == NODE_ROLE_FOLLOWER) {
|
||||||
stError("s-task:%s task on follower received dispatch msgs, dispatch msg rejected", id);
|
stError("s-task:%s task on follower received dispatch msgs, dispatch msg rejected", id);
|
||||||
status = TASK_INPUT_STATUS__REFUSED;
|
status = TASK_INPUT_STATUS__REFUSED;
|
||||||
} else {
|
} else {
|
||||||
|
@ -239,6 +245,22 @@ int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, S
|
||||||
atomic_add_fetch_32(&pTask->upstreamInfo.numOfClosed, 1);
|
atomic_add_fetch_32(&pTask->upstreamInfo.numOfClosed, 1);
|
||||||
streamTaskCloseUpstreamInput(pTask, pReq->upstreamTaskId);
|
streamTaskCloseUpstreamInput(pTask, pReq->upstreamTaskId);
|
||||||
stDebug("s-task:%s close inputQ for upstream:0x%x, msgId:%d", id, pReq->upstreamTaskId, pReq->msgId);
|
stDebug("s-task:%s close inputQ for upstream:0x%x, msgId:%d", id, pReq->upstreamTaskId, pReq->msgId);
|
||||||
|
} else if (pReq->type == STREAM_INPUT__TRANS_STATE) {
|
||||||
|
atomic_add_fetch_32(&pTask->upstreamInfo.numOfClosed, 1);
|
||||||
|
streamTaskCloseUpstreamInput(pTask, pReq->upstreamTaskId);
|
||||||
|
|
||||||
|
// disable the related stream task here to avoid it to receive the newly arrived data after the transfer-state
|
||||||
|
STaskId* pRelTaskId = &pTask->streamTaskId;
|
||||||
|
SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pRelTaskId->streamId, pRelTaskId->taskId);
|
||||||
|
if (pStreamTask != NULL) {
|
||||||
|
atomic_add_fetch_32(&pStreamTask->upstreamInfo.numOfClosed, 1);
|
||||||
|
streamTaskCloseUpstreamInput(pStreamTask, pReq->upstreamRelTaskId);
|
||||||
|
streamMetaReleaseTask(pMeta, pStreamTask);
|
||||||
|
}
|
||||||
|
|
||||||
|
stDebug("s-task:%s close inputQ for upstream:0x%x since trans-state msgId:%d recv, rel stream-task:0x%" PRIx64
|
||||||
|
" close inputQ for upstream:0x%x",
|
||||||
|
id, pReq->upstreamTaskId, pReq->msgId, pTask->streamTaskId.taskId, pReq->upstreamRelTaskId);
|
||||||
}
|
}
|
||||||
|
|
||||||
status = streamTaskAppendInputBlocks(pTask, pReq);
|
status = streamTaskAppendInputBlocks(pTask, pReq);
|
||||||
|
@ -247,9 +269,9 @@ int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, S
|
||||||
}
|
}
|
||||||
|
|
||||||
// disable the data from upstream tasks
|
// disable the data from upstream tasks
|
||||||
if (streamTaskGetStatus(pTask, NULL) == TASK_STATUS__HALT) {
|
// if (streamTaskGetStatus(pTask)->state == TASK_STATUS__HALT) {
|
||||||
status = TASK_INPUT_STATUS__BLOCKED;
|
// status = TASK_INPUT_STATUS__BLOCKED;
|
||||||
}
|
// }
|
||||||
|
|
||||||
{
|
{
|
||||||
// do send response with the input status
|
// do send response with the input status
|
||||||
|
@ -290,6 +312,7 @@ void streamTaskOpenAllUpstreamInput(SStreamTask* pTask) {
|
||||||
}
|
}
|
||||||
|
|
||||||
pTask->upstreamInfo.numOfClosed = 0;
|
pTask->upstreamInfo.numOfClosed = 0;
|
||||||
|
stDebug("s-task:%s opening up inputQ from upstream tasks", pTask->id.idStr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId) {
|
void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId) {
|
||||||
|
|
|
@ -187,7 +187,7 @@ int32_t streamProcessCheckpointBlock(SStreamTask* pTask, SStreamDataBlock* pBloc
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
// set task status
|
// set task status
|
||||||
if (streamTaskGetStatus(pTask, NULL) != TASK_STATUS__CK) {
|
if (streamTaskGetStatus(pTask)->state != TASK_STATUS__CK) {
|
||||||
pTask->chkInfo.checkpointingId = checkpointId;
|
pTask->chkInfo.checkpointingId = checkpointId;
|
||||||
code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_GEN_CHECKPOINT);
|
code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_GEN_CHECKPOINT);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
@ -292,40 +292,45 @@ int32_t streamSaveTaskCheckpointInfo(SStreamTask* p, int64_t checkpointId) {
|
||||||
int32_t vgId = pMeta->vgId;
|
int32_t vgId = pMeta->vgId;
|
||||||
const char* id = p->id.idStr;
|
const char* id = p->id.idStr;
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
SCheckpointInfo* pCKInfo = &p->chkInfo;
|
||||||
|
|
||||||
if (p->info.fillHistory == 1) {
|
// fill-history task, rsma task, and sink task will not generate the checkpoint
|
||||||
return code;
|
if ((p->info.fillHistory == 1) || (p->info.taskLevel > TASK_LEVEL__SINK)) {
|
||||||
}
|
|
||||||
|
|
||||||
if (p->info.taskLevel > TASK_LEVEL__SINK) {
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
taosThreadMutexLock(&p->lock);
|
taosThreadMutexLock(&p->lock);
|
||||||
|
|
||||||
ASSERT(p->chkInfo.checkpointId <= p->chkInfo.checkpointingId && p->chkInfo.checkpointingId == checkpointId &&
|
SStreamTaskState* pStatus = streamTaskGetStatus(p);
|
||||||
p->chkInfo.checkpointVer <= p->chkInfo.processedVer);
|
if (pStatus->state == TASK_STATUS__CK) {
|
||||||
p->chkInfo.checkpointId = p->chkInfo.checkpointingId;
|
ASSERT(pCKInfo->checkpointId <= pCKInfo->checkpointingId && pCKInfo->checkpointingId == checkpointId &&
|
||||||
p->chkInfo.checkpointVer = p->chkInfo.processedVer;
|
pCKInfo->checkpointVer <= pCKInfo->processedVer);
|
||||||
|
|
||||||
|
pCKInfo->checkpointId = pCKInfo->checkpointingId;
|
||||||
|
pCKInfo->checkpointVer = pCKInfo->processedVer;
|
||||||
|
|
||||||
streamTaskClearCheckInfo(p, false);
|
streamTaskClearCheckInfo(p, false);
|
||||||
char* str = NULL;
|
|
||||||
streamTaskGetStatus(p, &str);
|
|
||||||
|
|
||||||
code = streamTaskHandleEvent(p->status.pSM, TASK_EVENT_CHECKPOINT_DONE);
|
code = streamTaskHandleEvent(p->status.pSM, TASK_EVENT_CHECKPOINT_DONE);
|
||||||
taosThreadMutexUnlock(&p->lock);
|
taosThreadMutexUnlock(&p->lock);
|
||||||
|
} else {
|
||||||
|
stDebug("s-task:%s vgId:%d status:%s not keep the checkpoint metaInfo, checkpoint:%" PRId64 " failed", id, vgId,
|
||||||
|
pStatus->name, pCKInfo->checkpointingId);
|
||||||
|
taosThreadMutexUnlock(&p->lock);
|
||||||
|
|
||||||
|
return TSDB_CODE_STREAM_TASK_IVLD_STATUS;
|
||||||
|
}
|
||||||
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
stDebug("s-task:%s vgId:%d handle event:checkpoint-done failed", id, vgId);
|
stDebug("s-task:%s vgId:%d handle event:checkpoint-done failed", id, vgId);
|
||||||
return -1;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
stDebug("vgId:%d s-task:%s level:%d open upstream inputQ, save status after checkpoint, checkpointId:%" PRId64
|
stDebug("vgId:%d s-task:%s level:%d open upstream inputQ, save status after checkpoint, checkpointId:%" PRId64
|
||||||
", Ver(saved):%" PRId64 " currentVer:%" PRId64 ", status: normal, prev:%s",
|
", Ver(saved):%" PRId64 " currentVer:%" PRId64 ", status: normal, prev:%s",
|
||||||
vgId, id, p->info.taskLevel, checkpointId, p->chkInfo.checkpointVer, p->chkInfo.nextProcessVer, str);
|
vgId, id, p->info.taskLevel, checkpointId, pCKInfo->checkpointVer, pCKInfo->nextProcessVer, pStatus->name);
|
||||||
|
|
||||||
// save the task if not sink task
|
// save the task if not sink task
|
||||||
if (p->info.taskLevel != TASK_LEVEL__SINK) {
|
if (p->info.taskLevel < TASK_LEVEL__SINK) {
|
||||||
streamMetaWLock(pMeta);
|
streamMetaWLock(pMeta);
|
||||||
|
|
||||||
code = streamMetaSaveTask(pMeta, p);
|
code = streamMetaSaveTask(pMeta, p);
|
||||||
|
@ -344,6 +349,7 @@ int32_t streamSaveTaskCheckpointInfo(SStreamTask* p, int64_t checkpointId) {
|
||||||
|
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
}
|
}
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -445,13 +451,14 @@ int32_t streamTaskBuildCheckpoint(SStreamTask* pTask) {
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
int64_t startTs = pTask->chkInfo.startTs;
|
int64_t startTs = pTask->chkInfo.startTs;
|
||||||
int64_t ckId = pTask->chkInfo.checkpointingId;
|
int64_t ckId = pTask->chkInfo.checkpointingId;
|
||||||
|
const char* id = pTask->id.idStr;
|
||||||
|
|
||||||
// sink task do not need to save the status, and generated the checkpoint
|
// sink task do not need to save the status, and generated the checkpoint
|
||||||
if (pTask->info.taskLevel != TASK_LEVEL__SINK) {
|
if (pTask->info.taskLevel != TASK_LEVEL__SINK) {
|
||||||
stDebug("s-task:%s level:%d start gen checkpoint", pTask->id.idStr, pTask->info.taskLevel);
|
stDebug("s-task:%s level:%d start gen checkpoint", id, pTask->info.taskLevel);
|
||||||
code = streamBackendDoCheckpoint(pTask->pBackend, ckId);
|
code = streamBackendDoCheckpoint(pTask->pBackend, ckId);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
stError("s-task:%s gen checkpoint:%" PRId64 " failed, code:%s", pTask->id.idStr, ckId, tstrerror(terrno));
|
stError("s-task:%s gen checkpoint:%" PRId64 " failed, code:%s", id, ckId, tstrerror(terrno));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -465,39 +472,38 @@ int32_t streamTaskBuildCheckpoint(SStreamTask* pTask) {
|
||||||
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
// todo: let's retry send rsp to upstream/mnode
|
// todo: let's retry send rsp to upstream/mnode
|
||||||
stError("s-task:%s failed to send checkpoint rsp to upstream, checkpointId:%" PRId64 ", code:%s", pTask->id.idStr,
|
stError("s-task:%s failed to send checkpoint rsp to upstream, checkpointId:%" PRId64 ", code:%s", id, ckId,
|
||||||
ckId, tstrerror(code));
|
tstrerror(code));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// clear the checkpoint info, and commit the newest checkpoint info if all works are done successfully
|
// clear the checkpoint info, and commit the newest checkpoint info if all works are done successfully
|
||||||
if (code == TSDB_CODE_SUCCESS) {
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
code = streamSaveTaskCheckpointInfo(pTask, ckId);
|
code = streamSaveTaskCheckpointInfo(pTask, ckId);
|
||||||
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
|
code = streamTaskUploadChkp(pTask, ckId, (char*)id);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
stError("s-task:%s commit taskInfo failed, checkpoint:%" PRId64 " failed, code:%s", pTask->id.idStr, ckId,
|
stError("s-task:%s failed to upload checkpoint:%" PRId64 " failed", id, ckId);
|
||||||
tstrerror(terrno));
|
|
||||||
} else {
|
|
||||||
code = streamTaskUploadChkp(pTask, ckId, (char*)pTask->id.idStr);
|
|
||||||
if (code != 0) {
|
|
||||||
stError("s-task:%s failed to upload checkpoint:%" PRId64 " failed", pTask->id.idStr, ckId);
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
stError("s-task:%s commit taskInfo failed, checkpoint:%" PRId64 " failed, code:%s", id, ckId, tstrerror(code));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (code != TSDB_CODE_SUCCESS) { // clear the checkpoint info if failed
|
// clear the checkpoint info if failed
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
taosThreadMutexLock(&pTask->lock);
|
taosThreadMutexLock(&pTask->lock);
|
||||||
streamTaskClearCheckInfo(pTask, false);
|
streamTaskClearCheckInfo(pTask, false);
|
||||||
code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_CHECKPOINT_DONE);
|
code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_CHECKPOINT_DONE);
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
|
||||||
streamTaskSetCheckpointFailedId(pTask);
|
streamTaskSetCheckpointFailedId(pTask);
|
||||||
stDebug("s-task:%s clear checkpoint flag since gen checkpoint failed, checkpointId:%" PRId64, pTask->id.idStr,
|
stDebug("s-task:%s clear checkpoint flag since gen checkpoint failed, checkpointId:%" PRId64, id, ckId);
|
||||||
ckId);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
double el = (taosGetTimestampMs() - startTs) / 1000.0;
|
double el = (taosGetTimestampMs() - startTs) / 1000.0;
|
||||||
stInfo("s-task:%s vgId:%d level:%d, checkpointId:%" PRId64 " ver:%" PRId64 " elapsed time:%.2f Sec, %s ",
|
stInfo("s-task:%s vgId:%d level:%d, checkpointId:%" PRId64 " ver:%" PRId64 " elapsed time:%.2f Sec, %s ", id,
|
||||||
pTask->id.idStr, pTask->pMeta->vgId, pTask->info.taskLevel, ckId, pTask->chkInfo.checkpointVer, el,
|
pTask->pMeta->vgId, pTask->info.taskLevel, ckId, pTask->chkInfo.checkpointVer, el,
|
||||||
(code == TSDB_CODE_SUCCESS) ? "succ" : "failed");
|
(code == TSDB_CODE_SUCCESS) ? "succ" : "failed");
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
|
|
|
@ -58,6 +58,7 @@ int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* p
|
||||||
if (tEncodeI32(pEncoder, pReq->upstreamTaskId) < 0) return -1;
|
if (tEncodeI32(pEncoder, pReq->upstreamTaskId) < 0) return -1;
|
||||||
if (tEncodeI32(pEncoder, pReq->upstreamChildId) < 0) return -1;
|
if (tEncodeI32(pEncoder, pReq->upstreamChildId) < 0) return -1;
|
||||||
if (tEncodeI32(pEncoder, pReq->upstreamNodeId) < 0) return -1;
|
if (tEncodeI32(pEncoder, pReq->upstreamNodeId) < 0) return -1;
|
||||||
|
if (tEncodeI32(pEncoder, pReq->upstreamRelTaskId) < 0) return -1;
|
||||||
if (tEncodeI32(pEncoder, pReq->blockNum) < 0) return -1;
|
if (tEncodeI32(pEncoder, pReq->blockNum) < 0) return -1;
|
||||||
if (tEncodeI64(pEncoder, pReq->totalLen) < 0) return -1;
|
if (tEncodeI64(pEncoder, pReq->totalLen) < 0) return -1;
|
||||||
ASSERT(taosArrayGetSize(pReq->data) == pReq->blockNum);
|
ASSERT(taosArrayGetSize(pReq->data) == pReq->blockNum);
|
||||||
|
@ -84,6 +85,7 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) {
|
||||||
if (tDecodeI32(pDecoder, &pReq->upstreamTaskId) < 0) return -1;
|
if (tDecodeI32(pDecoder, &pReq->upstreamTaskId) < 0) return -1;
|
||||||
if (tDecodeI32(pDecoder, &pReq->upstreamChildId) < 0) return -1;
|
if (tDecodeI32(pDecoder, &pReq->upstreamChildId) < 0) return -1;
|
||||||
if (tDecodeI32(pDecoder, &pReq->upstreamNodeId) < 0) return -1;
|
if (tDecodeI32(pDecoder, &pReq->upstreamNodeId) < 0) return -1;
|
||||||
|
if (tDecodeI32(pDecoder, &pReq->upstreamRelTaskId) < 0) return -1;
|
||||||
if (tDecodeI32(pDecoder, &pReq->blockNum) < 0) return -1;
|
if (tDecodeI32(pDecoder, &pReq->blockNum) < 0) return -1;
|
||||||
if (tDecodeI64(pDecoder, &pReq->totalLen) < 0) return -1;
|
if (tDecodeI64(pDecoder, &pReq->totalLen) < 0) return -1;
|
||||||
|
|
||||||
|
@ -114,6 +116,7 @@ static int32_t tInitStreamDispatchReq(SStreamDispatchReq* pReq, const SStreamTas
|
||||||
pReq->upstreamTaskId = pTask->id.taskId;
|
pReq->upstreamTaskId = pTask->id.taskId;
|
||||||
pReq->upstreamChildId = pTask->info.selfChildId;
|
pReq->upstreamChildId = pTask->info.selfChildId;
|
||||||
pReq->upstreamNodeId = pTask->info.nodeId;
|
pReq->upstreamNodeId = pTask->info.nodeId;
|
||||||
|
pReq->upstreamRelTaskId = pTask->streamTaskId.taskId;
|
||||||
pReq->blockNum = numOfBlocks;
|
pReq->blockNum = numOfBlocks;
|
||||||
pReq->taskId = dstTaskId;
|
pReq->taskId = dstTaskId;
|
||||||
pReq->type = type;
|
pReq->type = type;
|
||||||
|
@ -696,10 +699,9 @@ int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask) {
|
||||||
int32_t numOfVgs = taosArrayGetSize(vgInfo);
|
int32_t numOfVgs = taosArrayGetSize(vgInfo);
|
||||||
pTask->notReadyTasks = numOfVgs;
|
pTask->notReadyTasks = numOfVgs;
|
||||||
|
|
||||||
char* p = NULL;
|
SStreamTaskState* pState = streamTaskGetStatus(pTask);
|
||||||
streamTaskGetStatus(pTask, &p);
|
|
||||||
stDebug("s-task:%s send scan-history data complete msg to downstream (shuffle-dispatch) %d tasks, status:%s",
|
stDebug("s-task:%s send scan-history data complete msg to downstream (shuffle-dispatch) %d tasks, status:%s",
|
||||||
pTask->id.idStr, numOfVgs, p);
|
pTask->id.idStr, numOfVgs, pState->name);
|
||||||
for (int32_t i = 0; i < numOfVgs; i++) {
|
for (int32_t i = 0; i < numOfVgs; i++) {
|
||||||
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
|
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
|
||||||
req.downstreamTaskId = pVgInfo->taskId;
|
req.downstreamTaskId = pVgInfo->taskId;
|
||||||
|
@ -819,9 +821,9 @@ int32_t doDispatchScanHistoryFinishMsg(SStreamTask* pTask, const SStreamScanHist
|
||||||
initRpcMsg(&msg, TDMT_VND_STREAM_SCAN_HISTORY_FINISH, buf, tlen + sizeof(SMsgHead));
|
initRpcMsg(&msg, TDMT_VND_STREAM_SCAN_HISTORY_FINISH, buf, tlen + sizeof(SMsgHead));
|
||||||
|
|
||||||
tmsgSendReq(pEpSet, &msg);
|
tmsgSendReq(pEpSet, &msg);
|
||||||
char* p = NULL;
|
|
||||||
streamTaskGetStatus(pTask, &p);
|
SStreamTaskState* pState = streamTaskGetStatus(pTask);
|
||||||
stDebug("s-task:%s status:%s dispatch scan-history finish msg to taskId:0x%x (vgId:%d)", pTask->id.idStr, p,
|
stDebug("s-task:%s status:%s dispatch scan-history finish msg to taskId:0x%x (vgId:%d)", pTask->id.idStr, pState->name,
|
||||||
pReq->downstreamTaskId, vgId);
|
pReq->downstreamTaskId, vgId);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,12 +24,12 @@
|
||||||
static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask);
|
static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask);
|
||||||
|
|
||||||
bool streamTaskShouldStop(const SStreamTask* pTask) {
|
bool streamTaskShouldStop(const SStreamTask* pTask) {
|
||||||
ETaskStatus s = streamTaskGetStatus(pTask, NULL);
|
SStreamTaskState* pState = streamTaskGetStatus(pTask);
|
||||||
return (s == TASK_STATUS__STOP) || (s == TASK_STATUS__DROPPING);
|
return (pState->state == TASK_STATUS__STOP) || (pState->state == TASK_STATUS__DROPPING);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool streamTaskShouldPause(const SStreamTask* pTask) {
|
bool streamTaskShouldPause(const SStreamTask* pTask) {
|
||||||
return (streamTaskGetStatus(pTask, NULL) == TASK_STATUS__PAUSE);
|
return (streamTaskGetStatus(pTask)->state == TASK_STATUS__PAUSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t doOutputResultBlockImpl(SStreamTask* pTask, SStreamDataBlock* pBlock) {
|
static int32_t doOutputResultBlockImpl(SStreamTask* pTask, SStreamDataBlock* pBlock) {
|
||||||
|
@ -107,12 +107,6 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pTask->inputq.status == TASK_INPUT_STATUS__BLOCKED) {
|
|
||||||
stWarn("s-task:%s downstream task inputQ blocked, idle for 1sec and retry exec task", pTask->id.idStr);
|
|
||||||
taosMsleep(1000);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
SSDataBlock* output = NULL;
|
SSDataBlock* output = NULL;
|
||||||
uint64_t ts = 0;
|
uint64_t ts = 0;
|
||||||
if ((code = qExecTask(pExecutor, &output, &ts)) < 0) {
|
if ((code = qExecTask(pExecutor, &output, &ts)) < 0) {
|
||||||
|
@ -344,11 +338,14 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) {
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||||
} else {
|
} else {
|
||||||
stDebug("s-task:%s fill-history task end, update related stream task:%s info, transfer exec state", id,
|
double el = (taosGetTimestampMs() - pTask->execInfo.step2Start) / 1000.;
|
||||||
pStreamTask->id.idStr);
|
stDebug(
|
||||||
|
"s-task:%s fill-history task end, scal wal elapsed time:%.2fSec,update related stream task:%s info, transfer "
|
||||||
|
"exec state",
|
||||||
|
id, el, pStreamTask->id.idStr);
|
||||||
}
|
}
|
||||||
|
|
||||||
ETaskStatus status = streamTaskGetStatus(pStreamTask, NULL);
|
ETaskStatus status = streamTaskGetStatus(pStreamTask)->state;
|
||||||
STimeWindow* pTimeWindow = &pStreamTask->dataRange.window;
|
STimeWindow* pTimeWindow = &pStreamTask->dataRange.window;
|
||||||
|
|
||||||
// It must be halted for a source stream task, since when the related scan-history-data task start scan the history
|
// It must be halted for a source stream task, since when the related scan-history-data task start scan the history
|
||||||
|
@ -374,8 +371,9 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) {
|
||||||
// In case of sink tasks, no need to halt them.
|
// In case of sink tasks, no need to halt them.
|
||||||
// In case of source tasks and agg tasks, we should HALT them, and wait for them to be idle. And then, it's safe to
|
// In case of source tasks and agg tasks, we should HALT them, and wait for them to be idle. And then, it's safe to
|
||||||
// start the task state transfer procedure.
|
// start the task state transfer procedure.
|
||||||
char* p = NULL;
|
SStreamTaskState* pState = streamTaskGetStatus(pStreamTask);
|
||||||
status = streamTaskGetStatus(pStreamTask, &p);
|
status = pState->state;
|
||||||
|
char* p = pState->name;
|
||||||
if (status == TASK_STATUS__STOP || status == TASK_STATUS__DROPPING) {
|
if (status == TASK_STATUS__STOP || status == TASK_STATUS__DROPPING) {
|
||||||
stError("s-task:%s failed to transfer state from fill-history task:%s, status:%s", id, pStreamTask->id.idStr, p);
|
stError("s-task:%s failed to transfer state from fill-history task:%s, status:%s", id, pStreamTask->id.idStr, p);
|
||||||
streamMetaReleaseTask(pMeta, pStreamTask);
|
streamMetaReleaseTask(pMeta, pStreamTask);
|
||||||
|
@ -393,8 +391,12 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// 1. expand the query time window for stream task of WAL scanner
|
// 1. expand the query time window for stream task of WAL scanner
|
||||||
|
if (pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||||
pTimeWindow->skey = INT64_MIN;
|
pTimeWindow->skey = INT64_MIN;
|
||||||
qStreamInfoResetTimewindowFilter(pStreamTask->exec.pExecutor);
|
qStreamInfoResetTimewindowFilter(pStreamTask->exec.pExecutor);
|
||||||
|
} else {
|
||||||
|
stDebug("s-task:%s non-source task no need to reset filter window", pStreamTask->id.idStr);
|
||||||
|
}
|
||||||
|
|
||||||
// 2. transfer the ownership of executor state
|
// 2. transfer the ownership of executor state
|
||||||
streamTaskReleaseState(pTask);
|
streamTaskReleaseState(pTask);
|
||||||
|
@ -408,10 +410,13 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) {
|
||||||
// 4. free it and remove fill-history task from disk meta-store
|
// 4. free it and remove fill-history task from disk meta-store
|
||||||
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &pTask->id);
|
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &pTask->id);
|
||||||
|
|
||||||
// 5. save to disk
|
// 5. assign the status to the value that will be kept in disk
|
||||||
pStreamTask->status.taskStatus = streamTaskGetStatus(pStreamTask, NULL);
|
pStreamTask->status.taskStatus = streamTaskGetStatus(pStreamTask)->state;
|
||||||
|
|
||||||
// 6. add empty delete block
|
// 6. open the inputQ for all upstream tasks
|
||||||
|
streamTaskOpenAllUpstreamInput(pStreamTask);
|
||||||
|
|
||||||
|
// 7. add empty delete block
|
||||||
if ((pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE) && taosQueueEmpty(pStreamTask->inputq.queue->pQueue)) {
|
if ((pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE) && taosQueueEmpty(pStreamTask->inputq.queue->pQueue)) {
|
||||||
SStreamRefDataBlock* pItem = taosAllocateQitem(sizeof(SStreamRefDataBlock), DEF_QITEM, 0);
|
SStreamRefDataBlock* pItem = taosAllocateQitem(sizeof(SStreamRefDataBlock), DEF_QITEM, 0);
|
||||||
|
|
||||||
|
@ -431,6 +436,8 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) {
|
||||||
|
|
||||||
int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
|
int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
SStreamMeta* pMeta = pTask->pMeta;
|
||||||
|
|
||||||
ASSERT(pTask->status.appendTranstateBlock == 1);
|
ASSERT(pTask->status.appendTranstateBlock == 1);
|
||||||
|
|
||||||
int32_t level = pTask->info.taskLevel;
|
int32_t level = pTask->info.taskLevel;
|
||||||
|
@ -440,8 +447,14 @@ int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
|
||||||
|
|
||||||
if (level == TASK_LEVEL__AGG || level == TASK_LEVEL__SOURCE) { // do transfer task operator states.
|
if (level == TASK_LEVEL__AGG || level == TASK_LEVEL__SOURCE) { // do transfer task operator states.
|
||||||
code = streamDoTransferStateToStreamTask(pTask);
|
code = streamDoTransferStateToStreamTask(pTask);
|
||||||
} else { // drop fill-history task
|
} else { // drop fill-history task and open inputQ of sink task
|
||||||
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pTask->pMeta->vgId, &pTask->id);
|
SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId);
|
||||||
|
if (pStreamTask != NULL) {
|
||||||
|
streamTaskOpenAllUpstreamInput(pStreamTask);
|
||||||
|
streamMetaReleaseTask(pMeta, pStreamTask);
|
||||||
|
}
|
||||||
|
|
||||||
|
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &pTask->id);
|
||||||
}
|
}
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
|
@ -497,16 +510,17 @@ static void doSetStreamInputBlock(SStreamTask* pTask, const void* pInput, int64_
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock) {
|
int32_t streamProcessTransstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock) {
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
int32_t level = pTask->info.taskLevel;
|
int32_t level = pTask->info.taskLevel;
|
||||||
|
|
||||||
if (level == TASK_LEVEL__AGG || level == TASK_LEVEL__SINK) {
|
if (level == TASK_LEVEL__AGG || level == TASK_LEVEL__SINK) {
|
||||||
int32_t remain = streamAlignTransferState(pTask);
|
int32_t remain = streamAlignTransferState(pTask);
|
||||||
|
|
||||||
if (remain > 0) {
|
if (remain > 0) {
|
||||||
streamFreeQitem((SStreamQueueItem*)pBlock);
|
streamFreeQitem((SStreamQueueItem*)pBlock);
|
||||||
stDebug("s-task:%s receive upstream transfer state msg, remain:%d", id, remain);
|
stDebug("s-task:%s receive upstream trans-state msg, not sent remain:%d", id, remain);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -537,7 +551,7 @@ int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock
|
||||||
}
|
}
|
||||||
} else { // non-dispatch task, do task state transfer directly
|
} else { // non-dispatch task, do task state transfer directly
|
||||||
streamFreeQitem((SStreamQueueItem*)pBlock);
|
streamFreeQitem((SStreamQueueItem*)pBlock);
|
||||||
stDebug("s-task:%s non-dispatch task, level:%d start to transfer state directly", id, pTask->info.taskLevel);
|
stDebug("s-task:%s non-dispatch task, level:%d start to transfer state directly", id, level);
|
||||||
ASSERT(pTask->info.fillHistory == 1);
|
ASSERT(pTask->info.fillHistory == 1);
|
||||||
|
|
||||||
code = streamTransferStateToStreamTask(pTask);
|
code = streamTransferStateToStreamTask(pTask);
|
||||||
|
@ -549,6 +563,10 @@ int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void setTaskSchedInfo(SStreamTask* pTask, int32_t idleTime) { pTask->status.schedIdleTime = idleTime; }
|
||||||
|
static void clearTaskSchedInfo(SStreamTask* pTask) { pTask->status.schedIdleTime = 0; }
|
||||||
|
static void setLastExecTs(SStreamTask* pTask, int64_t ts) { pTask->status.lastExecTs = ts; }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* todo: the batch of blocks should be tuned dynamic, according to the total elapsed time of each batch of blocks, the
|
* todo: the batch of blocks should be tuned dynamic, according to the total elapsed time of each batch of blocks, the
|
||||||
* appropriate batch of blocks should be handled in 5 to 10 sec.
|
* appropriate batch of blocks should be handled in 5 to 10 sec.
|
||||||
|
@ -563,9 +581,28 @@ int32_t doStreamExecTask(SStreamTask* pTask) {
|
||||||
int32_t blockSize = 0;
|
int32_t blockSize = 0;
|
||||||
int32_t numOfBlocks = 0;
|
int32_t numOfBlocks = 0;
|
||||||
SStreamQueueItem* pInput = NULL;
|
SStreamQueueItem* pInput = NULL;
|
||||||
if (streamTaskShouldStop(pTask) || (streamTaskGetStatus(pTask, NULL) == TASK_STATUS__UNINIT)) {
|
|
||||||
|
if (streamTaskShouldStop(pTask) || (streamTaskGetStatus(pTask)->state == TASK_STATUS__UNINIT)) {
|
||||||
stDebug("s-task:%s stream task is stopped", id);
|
stDebug("s-task:%s stream task is stopped", id);
|
||||||
break;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (streamQueueIsFull(pTask->outputq.queue)) {
|
||||||
|
stWarn("s-task:%s outputQ is full, idle for 500ms and retry", id);
|
||||||
|
setTaskSchedInfo(pTask, 500);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pTask->inputq.status == TASK_INPUT_STATUS__BLOCKED) {
|
||||||
|
stWarn("s-task:%s downstream task inputQ blocked, idle for 1sec and retry", id);
|
||||||
|
setTaskSchedInfo(pTask, 1000);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (taosGetTimestampMs() - pTask->status.lastExecTs < 50) {
|
||||||
|
stDebug("s-task:%s invoke with high frequency, idle and retry exec in 50ms", id);
|
||||||
|
setTaskSchedInfo(pTask, 50);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*int32_t code = */ streamTaskGetDataFromInputQ(pTask, &pInput, &numOfBlocks, &blockSize);
|
/*int32_t code = */ streamTaskGetDataFromInputQ(pTask, &pInput, &numOfBlocks, &blockSize);
|
||||||
|
@ -574,16 +611,15 @@ int32_t doStreamExecTask(SStreamTask* pTask) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t type = pInput->type;
|
|
||||||
|
|
||||||
// dispatch checkpoint msg to all downstream tasks
|
// dispatch checkpoint msg to all downstream tasks
|
||||||
|
int32_t type = pInput->type;
|
||||||
if (type == STREAM_INPUT__CHECKPOINT_TRIGGER) {
|
if (type == STREAM_INPUT__CHECKPOINT_TRIGGER) {
|
||||||
streamProcessCheckpointBlock(pTask, (SStreamDataBlock*)pInput);
|
streamProcessCheckpointBlock(pTask, (SStreamDataBlock*)pInput);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pInput->type == STREAM_INPUT__TRANS_STATE) {
|
if (type == STREAM_INPUT__TRANS_STATE) {
|
||||||
streamProcessTranstateBlock(pTask, (SStreamDataBlock*)pInput);
|
streamProcessTransstateBlock(pTask, (SStreamDataBlock*)pInput);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -623,7 +659,7 @@ int32_t doStreamExecTask(SStreamTask* pTask) {
|
||||||
if (ver != pInfo->processedVer) {
|
if (ver != pInfo->processedVer) {
|
||||||
stDebug("s-task:%s update processedVer(unsaved) from %" PRId64 " to %" PRId64 " nextProcessVer:%" PRId64
|
stDebug("s-task:%s update processedVer(unsaved) from %" PRId64 " to %" PRId64 " nextProcessVer:%" PRId64
|
||||||
" ckpt:%" PRId64,
|
" ckpt:%" PRId64,
|
||||||
pTask->id.idStr, pInfo->processedVer, ver, pInfo->nextProcessVer, pInfo->checkpointVer);
|
id, pInfo->processedVer, ver, pInfo->nextProcessVer, pInfo->checkpointVer);
|
||||||
pInfo->processedVer = ver;
|
pInfo->processedVer = ver;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -634,10 +670,9 @@ int32_t doStreamExecTask(SStreamTask* pTask) {
|
||||||
if (type == STREAM_INPUT__CHECKPOINT) {
|
if (type == STREAM_INPUT__CHECKPOINT) {
|
||||||
|
|
||||||
// todo add lock
|
// todo add lock
|
||||||
char* p = NULL;
|
SStreamTaskState* pState = streamTaskGetStatus(pTask);
|
||||||
ETaskStatus s = streamTaskGetStatus(pTask, &p);
|
if (pState->state == TASK_STATUS__CK) {
|
||||||
if (s == TASK_STATUS__CK) {
|
stDebug("s-task:%s checkpoint block received, set status:%s", id, pState->name);
|
||||||
stDebug("s-task:%s checkpoint block received, set status:%s", pTask->id.idStr, p);
|
|
||||||
streamTaskBuildCheckpoint(pTask);
|
streamTaskBuildCheckpoint(pTask);
|
||||||
} else {
|
} else {
|
||||||
// todo refactor
|
// todo refactor
|
||||||
|
@ -650,8 +685,8 @@ int32_t doStreamExecTask(SStreamTask* pTask) {
|
||||||
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
// todo: let's retry send rsp to upstream/mnode
|
// todo: let's retry send rsp to upstream/mnode
|
||||||
stError("s-task:%s failed to send checkpoint rsp to upstream, checkpointId:%d, code:%s", pTask->id.idStr,
|
stError("s-task:%s failed to send checkpoint rsp to upstream, checkpointId:%d, code:%s", id, 0,
|
||||||
0, tstrerror(code));
|
tstrerror(code));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -665,15 +700,117 @@ int32_t doStreamExecTask(SStreamTask* pTask) {
|
||||||
// the task may be set dropping/stopping, while it is still in the task queue, therefore, the sched-status can not
|
// the task may be set dropping/stopping, while it is still in the task queue, therefore, the sched-status can not
|
||||||
// be updated by tryExec function, therefore, the schedStatus will always be the TASK_SCHED_STATUS__WAITING.
|
// be updated by tryExec function, therefore, the schedStatus will always be the TASK_SCHED_STATUS__WAITING.
|
||||||
bool streamTaskIsIdle(const SStreamTask* pTask) {
|
bool streamTaskIsIdle(const SStreamTask* pTask) {
|
||||||
ETaskStatus status = streamTaskGetStatus(pTask, NULL);
|
ETaskStatus status = streamTaskGetStatus(pTask)->state;
|
||||||
return (pTask->status.schedStatus == TASK_SCHED_STATUS__INACTIVE || status == TASK_STATUS__STOP ||
|
return (pTask->status.schedStatus == TASK_SCHED_STATUS__INACTIVE || status == TASK_STATUS__STOP ||
|
||||||
status == TASK_STATUS__DROPPING);
|
status == TASK_STATUS__DROPPING);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool streamTaskReadyToRun(const SStreamTask* pTask, char** pStatus) {
|
bool streamTaskReadyToRun(const SStreamTask* pTask, char** pStatus) {
|
||||||
ETaskStatus st = streamTaskGetStatus(pTask, NULL);
|
SStreamTaskState* pState = streamTaskGetStatus(pTask);
|
||||||
return (st == TASK_STATUS__READY || st == TASK_STATUS__SCAN_HISTORY/* || st == TASK_STATUS__STREAM_SCAN_HISTORY*/ ||
|
|
||||||
st == TASK_STATUS__CK);
|
ETaskStatus st = pState->state;
|
||||||
|
if (pStatus != NULL) {
|
||||||
|
*pStatus = pState->name;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (st == TASK_STATUS__READY || st == TASK_STATUS__SCAN_HISTORY || st == TASK_STATUS__CK);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void doStreamExecTaskHelper(void* param, void* tmrId) {
|
||||||
|
SStreamTask* pTask = (SStreamTask*)param;
|
||||||
|
|
||||||
|
SStreamTaskState* p = streamTaskGetStatus(pTask);
|
||||||
|
if (p->state == TASK_STATUS__DROPPING || p->state == TASK_STATUS__STOP) {
|
||||||
|
streamTaskSetSchedStatusInactive(pTask);
|
||||||
|
|
||||||
|
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||||
|
stDebug("s-task:%s status:%s not resume task, ref:%d", pTask->id.idStr, p->name, ref);
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pTask->pMeta, pTask);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// task resume running
|
||||||
|
SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq));
|
||||||
|
if (pRunReq == NULL) {
|
||||||
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
/*int8_t status = */streamTaskSetSchedStatusInactive(pTask);
|
||||||
|
|
||||||
|
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||||
|
stError("failed to create msg to resume s-task:%s, reason out of memory, ref:%d", pTask->id.idStr, ref);
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pTask->pMeta, pTask);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
pRunReq->head.vgId = pTask->info.nodeId;
|
||||||
|
pRunReq->streamId = pTask->id.streamId;
|
||||||
|
pRunReq->taskId = pTask->id.taskId;
|
||||||
|
pRunReq->reqType = STREAM_EXEC_T_RESUME_TASK;
|
||||||
|
|
||||||
|
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||||
|
stDebug("trigger to resume s-task:%s after being idled for %dms, ref:%d", pTask->id.idStr, pTask->status.schedIdleTime, ref);
|
||||||
|
|
||||||
|
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
|
||||||
|
tmsgPutToQueue(pTask->pMsgCb, STREAM_QUEUE, &msg);
|
||||||
|
|
||||||
|
// release the task ref count
|
||||||
|
clearTaskSchedInfo(pTask);
|
||||||
|
streamMetaReleaseTask(pTask->pMeta, pTask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t schedTaskInFuture(SStreamTask* pTask) {
|
||||||
|
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
||||||
|
stDebug("s-task:%s task should idle, add into timer to retry in %dms, ref:%d", pTask->id.idStr,
|
||||||
|
pTask->status.schedIdleTime, ref);
|
||||||
|
|
||||||
|
// add one ref count for task
|
||||||
|
// todo this may be failed, and add ref may be failed.
|
||||||
|
SStreamTask* pAddRefTask = streamMetaAcquireTask(pTask->pMeta, pTask->id.streamId, pTask->id.taskId);
|
||||||
|
|
||||||
|
if (pTask->schedInfo.pIdleTimer == NULL) {
|
||||||
|
pTask->schedInfo.pIdleTimer = taosTmrStart(doStreamExecTaskHelper, pTask->status.schedIdleTime, pTask, streamTimer);
|
||||||
|
} else {
|
||||||
|
taosTmrReset(doStreamExecTaskHelper, pTask->status.schedIdleTime, pTask, streamTimer, &pTask->schedInfo.pIdleTimer);
|
||||||
|
}
|
||||||
|
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t streamResumeTask(SStreamTask* pTask) {
|
||||||
|
ASSERT(pTask->status.schedStatus == TASK_SCHED_STATUS__ACTIVE);
|
||||||
|
const char* id = pTask->id.idStr;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
/*int32_t code = */doStreamExecTask(pTask);
|
||||||
|
taosThreadMutexLock(&pTask->lock);
|
||||||
|
|
||||||
|
// check if this task needs to be idle for a while
|
||||||
|
if (pTask->status.schedIdleTime > 0) {
|
||||||
|
schedTaskInFuture(pTask);
|
||||||
|
|
||||||
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
setLastExecTs(pTask, taosGetTimestampMs());
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
int32_t numOfItems = streamQueueGetNumOfItems(pTask->inputq.queue);
|
||||||
|
|
||||||
|
if ((numOfItems == 0) || streamTaskShouldStop(pTask) || streamTaskShouldPause(pTask)) {
|
||||||
|
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE);
|
||||||
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
|
||||||
|
setLastExecTs(pTask, taosGetTimestampMs());
|
||||||
|
|
||||||
|
char* p = streamTaskGetStatus(pTask)->name;
|
||||||
|
stDebug("s-task:%s exec completed, status:%s, sched-status:%d, lastExecTs:%" PRId64, id, p,
|
||||||
|
pTask->status.schedStatus, pTask->status.lastExecTs);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamExecTask(SStreamTask* pTask) {
|
int32_t streamExecTask(SStreamTask* pTask) {
|
||||||
|
@ -682,29 +819,9 @@ int32_t streamExecTask(SStreamTask* pTask) {
|
||||||
|
|
||||||
int8_t schedStatus = streamTaskSetSchedStatusActive(pTask);
|
int8_t schedStatus = streamTaskSetSchedStatusActive(pTask);
|
||||||
if (schedStatus == TASK_SCHED_STATUS__WAITING) {
|
if (schedStatus == TASK_SCHED_STATUS__WAITING) {
|
||||||
while (1) {
|
streamResumeTask(pTask);
|
||||||
int32_t code = doStreamExecTask(pTask);
|
|
||||||
if (code < 0) { // todo this status should be removed
|
|
||||||
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__FAILED);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
taosThreadMutexLock(&pTask->lock);
|
|
||||||
if ((streamQueueGetNumOfItems(pTask->inputq.queue) == 0) || streamTaskShouldStop(pTask) ||
|
|
||||||
streamTaskShouldPause(pTask)) {
|
|
||||||
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE);
|
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
|
||||||
|
|
||||||
char* p = NULL;
|
|
||||||
streamTaskGetStatus(pTask, &p);
|
|
||||||
stDebug("s-task:%s exec completed, status:%s, sched-status:%d", id, p, pTask->status.schedStatus);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
char* p = NULL;
|
char* p = streamTaskGetStatus(pTask)->name;
|
||||||
streamTaskGetStatus(pTask, &p);
|
|
||||||
stDebug("s-task:%s already started to exec by other thread, status:%s, sched-status:%d", id, p,
|
stDebug("s-task:%s already started to exec by other thread, status:%s, sched-status:%d", id, p,
|
||||||
pTask->status.schedStatus);
|
pTask->status.schedStatus);
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,6 +61,7 @@ static void streamMetaEnvInit() {
|
||||||
streamMetaId = taosOpenRef(64, streamMetaCloseImpl);
|
streamMetaId = taosOpenRef(64, streamMetaCloseImpl);
|
||||||
|
|
||||||
metaRefMgtInit();
|
metaRefMgtInit();
|
||||||
|
streamTimerInit();
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamMetaInit() { taosThreadOnce(&streamMetaModuleInit, streamMetaEnvInit);}
|
void streamMetaInit() { taosThreadOnce(&streamMetaModuleInit, streamMetaEnvInit);}
|
||||||
|
@ -71,6 +72,7 @@ void streamMetaCleanup() {
|
||||||
taosCloseRef(streamMetaId);
|
taosCloseRef(streamMetaId);
|
||||||
|
|
||||||
metaRefMgtCleanup();
|
metaRefMgtCleanup();
|
||||||
|
streamTimerCleanUp();
|
||||||
}
|
}
|
||||||
|
|
||||||
void metaRefMgtInit() {
|
void metaRefMgtInit() {
|
||||||
|
@ -359,7 +361,7 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF
|
||||||
goto _err;
|
goto _err;
|
||||||
}
|
}
|
||||||
|
|
||||||
pMeta->walScanCounter = 0;
|
pMeta->scanInfo.scanCounter = 0;
|
||||||
pMeta->vgId = vgId;
|
pMeta->vgId = vgId;
|
||||||
pMeta->ahandle = ahandle;
|
pMeta->ahandle = ahandle;
|
||||||
pMeta->expandFunc = expandFunc;
|
pMeta->expandFunc = expandFunc;
|
||||||
|
@ -377,16 +379,16 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF
|
||||||
pMeta->rid = taosAddRef(streamMetaId, pMeta);
|
pMeta->rid = taosAddRef(streamMetaId, pMeta);
|
||||||
|
|
||||||
// set the attribute when running on Linux OS
|
// set the attribute when running on Linux OS
|
||||||
#if defined LINUX
|
|
||||||
TdThreadRwlockAttr attr;
|
TdThreadRwlockAttr attr;
|
||||||
taosThreadRwlockAttrInit(&attr);
|
taosThreadRwlockAttrInit(&attr);
|
||||||
|
|
||||||
|
#ifdef LINUX
|
||||||
pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
|
pthread_rwlockattr_setkind_np(&attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
|
||||||
taosThreadRwlockInit(&pMeta->lock, &attr);
|
|
||||||
|
|
||||||
taosThreadRwlockAttrDestroy(&attr);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
taosThreadRwlockInit(&pMeta->lock, &attr);
|
||||||
|
taosThreadRwlockAttrDestroy(&attr);
|
||||||
|
|
||||||
int64_t* pRid = taosMemoryMalloc(sizeof(int64_t));
|
int64_t* pRid = taosMemoryMalloc(sizeof(int64_t));
|
||||||
memcpy(pRid, &pMeta->rid, sizeof(pMeta->rid));
|
memcpy(pRid, &pMeta->rid, sizeof(pMeta->rid));
|
||||||
metaRefMgtAdd(pMeta->vgId, pRid);
|
metaRefMgtAdd(pMeta->vgId, pRid);
|
||||||
|
@ -452,7 +454,7 @@ void streamMetaClear(SStreamMeta* pMeta) {
|
||||||
// release the ref by timer
|
// release the ref by timer
|
||||||
if (p->info.triggerParam != 0 && p->info.fillHistory == 0) { // one more ref in timer
|
if (p->info.triggerParam != 0 && p->info.fillHistory == 0) { // one more ref in timer
|
||||||
stDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", p->id.idStr, p->refCnt);
|
stDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", p->id.idStr, p->refCnt);
|
||||||
taosTmrStop(p->schedInfo.pTimer);
|
taosTmrStop(p->schedInfo.pDelayTimer);
|
||||||
p->info.triggerParam = 0;
|
p->info.triggerParam = 0;
|
||||||
streamMetaReleaseTask(pMeta, p);
|
streamMetaReleaseTask(pMeta, p);
|
||||||
}
|
}
|
||||||
|
@ -467,6 +469,7 @@ void streamMetaClear(SStreamMeta* pMeta) {
|
||||||
taosArrayClear(pMeta->pTaskList);
|
taosArrayClear(pMeta->pTaskList);
|
||||||
taosArrayClear(pMeta->chkpSaved);
|
taosArrayClear(pMeta->chkpSaved);
|
||||||
taosArrayClear(pMeta->chkpInUse);
|
taosArrayClear(pMeta->chkpInUse);
|
||||||
|
|
||||||
pMeta->numOfStreamTasks = 0;
|
pMeta->numOfStreamTasks = 0;
|
||||||
pMeta->numOfPausedTasks = 0;
|
pMeta->numOfPausedTasks = 0;
|
||||||
|
|
||||||
|
@ -730,7 +733,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
|
||||||
|
|
||||||
if (pTask->info.triggerParam != 0 && pTask->info.fillHistory == 0) {
|
if (pTask->info.triggerParam != 0 && pTask->info.fillHistory == 0) {
|
||||||
stDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", pTask->id.idStr, pTask->refCnt);
|
stDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", pTask->id.idStr, pTask->refCnt);
|
||||||
taosTmrStop(pTask->schedInfo.pTimer);
|
taosTmrStop(pTask->schedInfo.pDelayTimer);
|
||||||
pTask->info.triggerParam = 0;
|
pTask->info.triggerParam = 0;
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
}
|
}
|
||||||
|
@ -1093,13 +1096,13 @@ static int32_t metaHeartbeatToMnodeImpl(SStreamMeta* pMeta) {
|
||||||
|
|
||||||
STaskStatusEntry entry = {
|
STaskStatusEntry entry = {
|
||||||
.id = *pId,
|
.id = *pId,
|
||||||
.status = streamTaskGetStatus(*pTask, NULL),
|
.status = streamTaskGetStatus(*pTask)->state,
|
||||||
.nodeId = hbMsg.vgId,
|
.nodeId = hbMsg.vgId,
|
||||||
.stage = pMeta->stage,
|
.stage = pMeta->stage,
|
||||||
.inputQUsed = SIZE_IN_MiB(streamQueueGetItemSize((*pTask)->inputq.queue)),
|
.inputQUsed = SIZE_IN_MiB(streamQueueGetItemSize((*pTask)->inputq.queue)),
|
||||||
};
|
};
|
||||||
|
|
||||||
entry.inputRate = entry.inputQUsed * 100.0 / STREAM_TASK_QUEUE_CAPACITY_IN_SIZE;
|
entry.inputRate = entry.inputQUsed * 100.0 / (2*STREAM_TASK_QUEUE_CAPACITY_IN_SIZE);
|
||||||
if ((*pTask)->info.taskLevel == TASK_LEVEL__SINK) {
|
if ((*pTask)->info.taskLevel == TASK_LEVEL__SINK) {
|
||||||
entry.sinkQuota = (*pTask)->outputInfo.pTokenBucket->quotaRate;
|
entry.sinkQuota = (*pTask)->outputInfo.pTokenBucket->quotaRate;
|
||||||
entry.sinkDataSize = SIZE_IN_MiB((*pTask)->execInfo.sink.dataSize);
|
entry.sinkDataSize = SIZE_IN_MiB((*pTask)->execInfo.sink.dataSize);
|
||||||
|
@ -1262,11 +1265,11 @@ void streamMetaNotifyClose(SStreamMeta* pMeta) {
|
||||||
|
|
||||||
// wait for the stream meta hb function stopping
|
// wait for the stream meta hb function stopping
|
||||||
if (pMeta->role == NODE_ROLE_LEADER) {
|
if (pMeta->role == NODE_ROLE_LEADER) {
|
||||||
pMeta->pHbInfo->stopFlag = STREAM_META_WILL_STOP;
|
// pMeta->pHbInfo->stopFlag = STREAM_META_WILL_STOP;
|
||||||
while (pMeta->pHbInfo->stopFlag != STREAM_META_OK_TO_STOP) {
|
// while (pMeta->pHbInfo->stopFlag != STREAM_META_OK_TO_STOP) {
|
||||||
taosMsleep(100);
|
// taosMsleep(100);
|
||||||
stDebug("vgId:%d wait for meta to stop timer", pMeta->vgId);
|
// stDebug("vgId:%d wait for meta to stop timer", pMeta->vgId);
|
||||||
}
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
stDebug("vgId:%d start to check all tasks", vgId);
|
stDebug("vgId:%d start to check all tasks", vgId);
|
||||||
|
@ -1362,13 +1365,13 @@ SArray* streamMetaSendMsgBeforeCloseTasks(SStreamMeta* pMeta) {
|
||||||
}
|
}
|
||||||
|
|
||||||
taosThreadMutexLock(&pTask->lock);
|
taosThreadMutexLock(&pTask->lock);
|
||||||
char* p = NULL;
|
|
||||||
ETaskStatus s = streamTaskGetStatus(pTask, &p);
|
SStreamTaskState* pState = streamTaskGetStatus(pTask);
|
||||||
if (s == TASK_STATUS__CK) {
|
if (pState->state == TASK_STATUS__CK) {
|
||||||
streamTaskSetCheckpointFailedId(pTask);
|
streamTaskSetCheckpointFailedId(pTask);
|
||||||
stDebug("s-task:%s mark the checkpoint:%"PRId64" failed", pTask->id.idStr, pTask->chkInfo.checkpointingId);
|
stDebug("s-task:%s mark the checkpoint:%"PRId64" failed", pTask->id.idStr, pTask->chkInfo.checkpointingId);
|
||||||
} else {
|
} else {
|
||||||
stDebug("s-task:%s status:%s not reset the checkpoint", pTask->id.idStr, p);
|
stDebug("s-task:%s status:%s not reset the checkpoint", pTask->id.idStr, pState->name);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
@ -1463,13 +1466,16 @@ int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) {
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pTaskId->streamId, pTaskId->taskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pTaskId->streamId, pTaskId->taskId);
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
stError("vgId:%d failed to acquire task:0x%x during start tasks", pMeta->vgId, pTaskId->taskId);
|
stError("vgId:%d failed to acquire task:0x%x during start tasks", pMeta->vgId, pTaskId->taskId);
|
||||||
streamMetaUpdateTaskDownstreamStatus(pMeta, pTaskId->streamId, pTaskId->taskId, 0, now, false);
|
streamMetaAddTaskLaunchResult(pMeta, pTaskId->streamId, pTaskId->taskId, 0, now, false);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// todo: may be we should find the related fill-history task and set it failed.
|
||||||
|
|
||||||
// fill-history task can only be launched by related stream tasks.
|
// fill-history task can only be launched by related stream tasks.
|
||||||
STaskExecStatisInfo* pInfo = &pTask->execInfo;
|
STaskExecStatisInfo* pInfo = &pTask->execInfo;
|
||||||
if (pTask->info.fillHistory == 1) {
|
if (pTask->info.fillHistory == 1) {
|
||||||
|
stDebug("s-task:%s fill-history task wait related stream task start", pTask->id.idStr);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -1481,18 +1487,21 @@ int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) {
|
||||||
streamLaunchFillHistoryTask(pTask);
|
streamLaunchFillHistoryTask(pTask);
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMetaUpdateTaskDownstreamStatus(pMeta, pTaskId->streamId, pTaskId->taskId, pInfo->init, pInfo->start, true);
|
streamMetaAddTaskLaunchResult(pMeta, pTaskId->streamId, pTaskId->taskId, pInfo->init, pInfo->start, true);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
EStreamTaskEvent event = /*(HAS_RELATED_FILLHISTORY_TASK(pTask)) ? TASK_EVENT_INIT_STREAM_SCANHIST : */TASK_EVENT_INIT;
|
int32_t ret = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT);
|
||||||
int32_t ret = streamTaskHandleEvent(pTask->status.pSM, event);
|
|
||||||
if (ret != TSDB_CODE_SUCCESS) {
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
stError("vgId:%d failed to handle event:%d", pMeta->vgId, event);
|
stError("vgId:%d failed to handle event:%d", pMeta->vgId, TASK_EVENT_INIT);
|
||||||
code = ret;
|
code = ret;
|
||||||
|
|
||||||
streamMetaUpdateTaskDownstreamStatus(pMeta, pTaskId->streamId, pTaskId->taskId, pInfo->init, pInfo->start, false);
|
streamMetaAddTaskLaunchResult(pMeta, pTaskId->streamId, pTaskId->taskId, pInfo->init, pInfo->start, false);
|
||||||
|
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||||
|
STaskId* pId = &pTask->hTaskInfo.id;
|
||||||
|
streamMetaAddTaskLaunchResult(pMeta, pId->streamId, pId->taskId, pInfo->init, pInfo->start, false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
@ -1502,3 +1511,40 @@ int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) {
|
||||||
taosArrayDestroy(pTaskList);
|
taosArrayDestroy(pTaskList);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) {
|
||||||
|
int32_t vgId = pMeta->vgId;
|
||||||
|
stInfo("vgId:%d start to task:0x%x by checking downstream status", vgId, taskId);
|
||||||
|
|
||||||
|
SStreamTask* pTask = streamMetaAcquireTask(pMeta, streamId, taskId);
|
||||||
|
if (pTask == NULL) {
|
||||||
|
stError("vgId:%d failed to acquire task:0x%x during start tasks", pMeta->vgId, taskId);
|
||||||
|
streamMetaAddTaskLaunchResult(pMeta, streamId, taskId, 0, taosGetTimestampMs(), false);
|
||||||
|
return TSDB_CODE_STREAM_TASK_IVLD_STATUS;
|
||||||
|
}
|
||||||
|
|
||||||
|
// todo: may be we should find the related fill-history task and set it failed.
|
||||||
|
|
||||||
|
// fill-history task can only be launched by related stream tasks.
|
||||||
|
STaskExecStatisInfo* pInfo = &pTask->execInfo;
|
||||||
|
if (pTask->info.fillHistory == 1) {
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT(pTask->status.downstreamReady == 0);
|
||||||
|
|
||||||
|
int32_t ret = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT);
|
||||||
|
if (ret != TSDB_CODE_SUCCESS) {
|
||||||
|
stError("vgId:%d failed to handle event:%d", pMeta->vgId, TASK_EVENT_INIT);
|
||||||
|
|
||||||
|
streamMetaAddTaskLaunchResult(pMeta, streamId, taskId, pInfo->init, pInfo->start, false);
|
||||||
|
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||||
|
STaskId* pId = &pTask->hTaskInfo.id;
|
||||||
|
streamMetaAddTaskLaunchResult(pMeta, pId->streamId, pId->taskId, pInfo->init, pInfo->start, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
return ret;
|
||||||
|
}
|
|
@ -17,8 +17,7 @@
|
||||||
|
|
||||||
#define MAX_STREAM_EXEC_BATCH_NUM 32
|
#define MAX_STREAM_EXEC_BATCH_NUM 32
|
||||||
#define MAX_SMOOTH_BURST_RATIO 5 // 5 sec
|
#define MAX_SMOOTH_BURST_RATIO 5 // 5 sec
|
||||||
#define WAIT_FOR_DURATION 40
|
#define WAIT_FOR_DURATION 10
|
||||||
#define OUTPUT_QUEUE_FULL_WAIT_DURATION 500 // 500 ms
|
|
||||||
|
|
||||||
// todo refactor:
|
// todo refactor:
|
||||||
// read data from input queue
|
// read data from input queue
|
||||||
|
@ -148,8 +147,6 @@ const char* streamQueueItemGetTypeStr(int32_t type) {
|
||||||
|
|
||||||
int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks,
|
int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks,
|
||||||
int32_t* blockSize) {
|
int32_t* blockSize) {
|
||||||
int32_t retryTimes = 0;
|
|
||||||
int32_t MAX_RETRY_TIMES = 5;
|
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
int32_t taskLevel = pTask->info.taskLevel;
|
int32_t taskLevel = pTask->info.taskLevel;
|
||||||
|
|
||||||
|
@ -157,10 +154,10 @@ int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInpu
|
||||||
*numOfBlocks = 0;
|
*numOfBlocks = 0;
|
||||||
*blockSize = 0;
|
*blockSize = 0;
|
||||||
|
|
||||||
|
// todo remove it
|
||||||
// no available token in bucket for sink task, let's wait for a little bit
|
// no available token in bucket for sink task, let's wait for a little bit
|
||||||
if (taskLevel == TASK_LEVEL__SINK && (!streamTaskExtractAvailableToken(pTask->outputInfo.pTokenBucket, pTask->id.idStr))) {
|
if (taskLevel == TASK_LEVEL__SINK && (!streamTaskExtractAvailableToken(pTask->outputInfo.pTokenBucket, pTask->id.idStr))) {
|
||||||
stDebug("s-task:%s no available token in bucket for sink data, wait for 10ms", id);
|
stDebug("s-task:%s no available token in bucket for sink data, wait for 10ms", id);
|
||||||
taosMsleep(10);
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -172,10 +169,6 @@ int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInpu
|
||||||
|
|
||||||
SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputq.queue);
|
SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputq.queue);
|
||||||
if (qItem == NULL) {
|
if (qItem == NULL) {
|
||||||
if ((taskLevel == TASK_LEVEL__SOURCE || taskLevel == TASK_LEVEL__SINK) && (++retryTimes) < MAX_RETRY_TIMES) {
|
|
||||||
taosMsleep(WAIT_FOR_DURATION);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// restore the token to bucket
|
// restore the token to bucket
|
||||||
if (*numOfBlocks > 0) {
|
if (*numOfBlocks > 0) {
|
||||||
|
@ -342,23 +335,6 @@ int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem)
|
||||||
// the result should be put into the outputQ in any cases, the result may be lost otherwise.
|
// the result should be put into the outputQ in any cases, the result may be lost otherwise.
|
||||||
int32_t streamTaskPutDataIntoOutputQ(SStreamTask* pTask, SStreamDataBlock* pBlock) {
|
int32_t streamTaskPutDataIntoOutputQ(SStreamTask* pTask, SStreamDataBlock* pBlock) {
|
||||||
STaosQueue* pQueue = pTask->outputq.queue->pQueue;
|
STaosQueue* pQueue = pTask->outputq.queue->pQueue;
|
||||||
|
|
||||||
// wait for the output queue is available for new data to dispatch
|
|
||||||
while (streamQueueIsFull(pTask->outputq.queue)) {
|
|
||||||
if (streamTaskShouldStop(pTask)) {
|
|
||||||
stInfo("s-task:%s discard result block due to task stop", pTask->id.idStr);
|
|
||||||
return TSDB_CODE_STREAM_EXEC_CANCELLED;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t total = streamQueueGetNumOfItems(pTask->outputq.queue);
|
|
||||||
double size = SIZE_IN_MiB(taosQueueMemorySize(pQueue));
|
|
||||||
// let's wait for there are enough space to hold this result pBlock
|
|
||||||
stDebug("s-task:%s outputQ is full, wait for %dms and retry, outputQ items:%d, size:%.2fMiB", pTask->id.idStr,
|
|
||||||
OUTPUT_QUEUE_FULL_WAIT_DURATION, total, size);
|
|
||||||
|
|
||||||
taosMsleep(OUTPUT_QUEUE_FULL_WAIT_DURATION);
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t code = taosWriteQitem(pQueue, pBlock);
|
int32_t code = taosWriteQitem(pQueue, pBlock);
|
||||||
|
|
||||||
int32_t total = streamQueueGetNumOfItems(pTask->outputq.queue);
|
int32_t total = streamQueueGetNumOfItems(pTask->outputq.queue);
|
||||||
|
@ -366,9 +342,16 @@ int32_t streamTaskPutDataIntoOutputQ(SStreamTask* pTask, SStreamDataBlock* pBloc
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
stError("s-task:%s failed to put res into outputQ, outputQ items:%d, size:%.2fMiB code:%s, result lost",
|
stError("s-task:%s failed to put res into outputQ, outputQ items:%d, size:%.2fMiB code:%s, result lost",
|
||||||
pTask->id.idStr, total + 1, size, tstrerror(code));
|
pTask->id.idStr, total + 1, size, tstrerror(code));
|
||||||
|
} else {
|
||||||
|
if (streamQueueIsFull(pTask->outputq.queue)) {
|
||||||
|
stWarn(
|
||||||
|
"s-task:%s outputQ is full(outputQ items:%d, size:%.2fMiB), set the output status BLOCKING, wait for 500ms "
|
||||||
|
"after handle this batch of blocks",
|
||||||
|
pTask->id.idStr, total, size);
|
||||||
} else {
|
} else {
|
||||||
stDebug("s-task:%s data put into outputQ, outputQ items:%d, size:%.2fMiB", pTask->id.idStr, total, size);
|
stDebug("s-task:%s data put into outputQ, outputQ items:%d, size:%.2fMiB", pTask->id.idStr, total, size);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
typedef struct SLaunchHTaskInfo {
|
typedef struct SLaunchHTaskInfo {
|
||||||
SStreamMeta* pMeta;
|
SStreamMeta* pMeta;
|
||||||
STaskId id;
|
STaskId id;
|
||||||
|
STaskId hTaskId;
|
||||||
} SLaunchHTaskInfo;
|
} SLaunchHTaskInfo;
|
||||||
|
|
||||||
typedef struct STaskRecheckInfo {
|
typedef struct STaskRecheckInfo {
|
||||||
|
@ -43,20 +44,19 @@ typedef struct STaskInitTs {
|
||||||
static int32_t streamSetParamForScanHistory(SStreamTask* pTask);
|
static int32_t streamSetParamForScanHistory(SStreamTask* pTask);
|
||||||
static void streamTaskSetRangeStreamCalc(SStreamTask* pTask);
|
static void streamTaskSetRangeStreamCalc(SStreamTask* pTask);
|
||||||
static int32_t initScanHistoryReq(SStreamTask* pTask, SStreamScanHistoryReq* pReq, int8_t igUntreated);
|
static int32_t initScanHistoryReq(SStreamTask* pTask, SStreamScanHistoryReq* pReq, int8_t igUntreated);
|
||||||
static SLaunchHTaskInfo* createHTaskLaunchInfo(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
static SLaunchHTaskInfo* createHTaskLaunchInfo(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t hStreamId,
|
||||||
|
int32_t hTaskId);
|
||||||
static void tryLaunchHistoryTask(void* param, void* tmrId);
|
static void tryLaunchHistoryTask(void* param, void* tmrId);
|
||||||
static void doProcessDownstreamReadyRsp(SStreamTask* pTask);
|
static void doProcessDownstreamReadyRsp(SStreamTask* pTask);
|
||||||
|
|
||||||
int32_t streamTaskSetReady(SStreamTask* pTask) {
|
int32_t streamTaskSetReady(SStreamTask* pTask) {
|
||||||
char* p = NULL;
|
|
||||||
int32_t numOfDowns = streamTaskGetNumOfDownstream(pTask);
|
int32_t numOfDowns = streamTaskGetNumOfDownstream(pTask);
|
||||||
ETaskStatus status = streamTaskGetStatus(pTask, &p);
|
SStreamTaskState* p = streamTaskGetStatus(pTask);
|
||||||
|
|
||||||
if ((status == TASK_STATUS__SCAN_HISTORY/* || status == TASK_STATUS__STREAM_SCAN_HISTORY*/) &&
|
if ((p->state == TASK_STATUS__SCAN_HISTORY) && pTask->info.taskLevel != TASK_LEVEL__SOURCE) {
|
||||||
pTask->info.taskLevel != TASK_LEVEL__SOURCE) {
|
|
||||||
pTask->numOfWaitingUpstream = taosArrayGetSize(pTask->upstreamInfo.pList);
|
pTask->numOfWaitingUpstream = taosArrayGetSize(pTask->upstreamInfo.pList);
|
||||||
stDebug("s-task:%s level:%d task wait for %d upstream tasks complete scan-history procedure, status:%s",
|
stDebug("s-task:%s level:%d task wait for %d upstream tasks complete scan-history procedure, status:%s",
|
||||||
pTask->id.idStr, pTask->info.taskLevel, pTask->numOfWaitingUpstream, p);
|
pTask->id.idStr, pTask->info.taskLevel, pTask->numOfWaitingUpstream, p->name);
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(pTask->status.downstreamReady == 0);
|
ASSERT(pTask->status.downstreamReady == 0);
|
||||||
|
@ -65,7 +65,7 @@ int32_t streamTaskSetReady(SStreamTask* pTask) {
|
||||||
pTask->execInfo.start = taosGetTimestampMs();
|
pTask->execInfo.start = taosGetTimestampMs();
|
||||||
int64_t el = (pTask->execInfo.start - pTask->execInfo.init);
|
int64_t el = (pTask->execInfo.start - pTask->execInfo.init);
|
||||||
stDebug("s-task:%s all %d downstream ready, init completed, elapsed time:%" PRId64 "ms, task status:%s",
|
stDebug("s-task:%s all %d downstream ready, init completed, elapsed time:%" PRId64 "ms, task status:%s",
|
||||||
pTask->id.idStr, numOfDowns, el, p);
|
pTask->id.idStr, numOfDowns, el, p->name);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,12 +93,11 @@ static void doReExecScanhistory(void* param, void* tmrId) {
|
||||||
SStreamTask* pTask = param;
|
SStreamTask* pTask = param;
|
||||||
pTask->schedHistoryInfo.numOfTicks -= 1;
|
pTask->schedHistoryInfo.numOfTicks -= 1;
|
||||||
|
|
||||||
char* p = NULL;
|
SStreamTaskState* p = streamTaskGetStatus(pTask);
|
||||||
ETaskStatus status = streamTaskGetStatus(pTask, &p);
|
if (p->state == TASK_STATUS__DROPPING || p->state == TASK_STATUS__STOP) {
|
||||||
if (status == TASK_STATUS__DROPPING || status == TASK_STATUS__STOP) {
|
|
||||||
streamMetaReleaseTask(pTask->pMeta, pTask);
|
streamMetaReleaseTask(pTask->pMeta, pTask);
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||||
stDebug("s-task:%s status:%s not start scan-history again, ref:%d", pTask->id.idStr, p, ref);
|
stDebug("s-task:%s status:%s not start scan-history again, ref:%d", pTask->id.idStr, p->name, ref);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -155,10 +154,9 @@ static int32_t doStartScanHistoryTask(SStreamTask* pTask) {
|
||||||
|
|
||||||
int32_t streamTaskStartScanHistory(SStreamTask* pTask) {
|
int32_t streamTaskStartScanHistory(SStreamTask* pTask) {
|
||||||
int32_t level = pTask->info.taskLevel;
|
int32_t level = pTask->info.taskLevel;
|
||||||
ETaskStatus status = streamTaskGetStatus(pTask, NULL);
|
ETaskStatus status = streamTaskGetStatus(pTask)->state;
|
||||||
|
|
||||||
ASSERT(pTask->status.downstreamReady == 1 &&
|
ASSERT((pTask->status.downstreamReady == 1) && (status == TASK_STATUS__SCAN_HISTORY));
|
||||||
((status == TASK_STATUS__SCAN_HISTORY)/* || (status == TASK_STATUS__STREAM_SCAN_HISTORY)*/));
|
|
||||||
|
|
||||||
if (level == TASK_LEVEL__SOURCE) {
|
if (level == TASK_LEVEL__SOURCE) {
|
||||||
return doStartScanHistoryTask(pTask);
|
return doStartScanHistoryTask(pTask);
|
||||||
|
@ -315,7 +313,7 @@ int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_
|
||||||
id, upstreamTaskId, vgId, stage, pInfo->stage);
|
id, upstreamTaskId, vgId, stage, pInfo->stage);
|
||||||
// record the checkpoint failure id and sent to mnode
|
// record the checkpoint failure id and sent to mnode
|
||||||
taosThreadMutexLock(&pTask->lock);
|
taosThreadMutexLock(&pTask->lock);
|
||||||
ETaskStatus status = streamTaskGetStatus(pTask, NULL);
|
ETaskStatus status = streamTaskGetStatus(pTask)->state;
|
||||||
if (status == TASK_STATUS__CK) {
|
if (status == TASK_STATUS__CK) {
|
||||||
streamTaskSetCheckpointFailedId(pTask);
|
streamTaskSetCheckpointFailedId(pTask);
|
||||||
}
|
}
|
||||||
|
@ -325,7 +323,7 @@ int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_
|
||||||
if (pInfo->stage != stage) {
|
if (pInfo->stage != stage) {
|
||||||
|
|
||||||
taosThreadMutexLock(&pTask->lock);
|
taosThreadMutexLock(&pTask->lock);
|
||||||
ETaskStatus status = streamTaskGetStatus(pTask, NULL);
|
ETaskStatus status = streamTaskGetStatus(pTask)->state;
|
||||||
if (status == TASK_STATUS__CK) {
|
if (status == TASK_STATUS__CK) {
|
||||||
streamTaskSetCheckpointFailedId(pTask);
|
streamTaskSetCheckpointFailedId(pTask);
|
||||||
}
|
}
|
||||||
|
@ -346,20 +344,20 @@ int32_t streamTaskOnNormalTaskReady(SStreamTask* pTask) {
|
||||||
streamTaskSetReady(pTask);
|
streamTaskSetReady(pTask);
|
||||||
streamTaskSetRangeStreamCalc(pTask);
|
streamTaskSetRangeStreamCalc(pTask);
|
||||||
|
|
||||||
char* p = NULL;
|
SStreamTaskState* p = streamTaskGetStatus(pTask);
|
||||||
ETaskStatus status = streamTaskGetStatus(pTask, &p);
|
ASSERT(p->state == TASK_STATUS__READY);
|
||||||
ASSERT(status == TASK_STATUS__READY);
|
|
||||||
|
|
||||||
|
int8_t schedStatus = pTask->status.schedStatus;
|
||||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||||
int64_t startVer = walReaderGetCurrentVer(pTask->exec.pWalReader);
|
int64_t startVer = walReaderGetCurrentVer(pTask->exec.pWalReader);
|
||||||
if (startVer == -1) {
|
if (startVer == -1) {
|
||||||
startVer = pTask->chkInfo.nextProcessVer;
|
startVer = pTask->chkInfo.nextProcessVer;
|
||||||
}
|
}
|
||||||
|
|
||||||
stDebug("s-task:%s no need to scan-history data, status:%s, sched-status:%d, ready for data from wal ver:%" PRId64,
|
stDebug("s-task:%s status:%s, sched-status:%d, ready for data from wal ver:%" PRId64, id, p->name, schedStatus,
|
||||||
id, p, pTask->status.schedStatus, startVer);
|
startVer);
|
||||||
} else {
|
} else {
|
||||||
stDebug("s-task:%s level:%d status:%s sched-status:%d", id, pTask->info.taskLevel, p, pTask->status.schedStatus);
|
stDebug("s-task:%s level:%d status:%s sched-status:%d", id, pTask->info.taskLevel, p->name, schedStatus);
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -372,15 +370,14 @@ int32_t streamTaskOnScanhistoryTaskReady(SStreamTask* pTask) {
|
||||||
streamTaskSetReady(pTask);
|
streamTaskSetReady(pTask);
|
||||||
streamTaskSetRangeStreamCalc(pTask);
|
streamTaskSetRangeStreamCalc(pTask);
|
||||||
|
|
||||||
char* p = NULL;
|
SStreamTaskState* p = streamTaskGetStatus(pTask);
|
||||||
ETaskStatus status = streamTaskGetStatus(pTask, &p);
|
ASSERT(p->state == TASK_STATUS__SCAN_HISTORY);
|
||||||
ASSERT(status == TASK_STATUS__SCAN_HISTORY/* || status == TASK_STATUS__STREAM_SCAN_HISTORY*/);
|
|
||||||
|
|
||||||
if (pTask->info.fillHistory == 1) {
|
if (pTask->info.fillHistory == 1) {
|
||||||
stDebug("s-task:%s fill-history task enters into scan-history data stage, status:%s", id, p);
|
stDebug("s-task:%s fill-history task enters into scan-history data stage, status:%s", id, p->name);
|
||||||
streamTaskStartScanHistory(pTask);
|
streamTaskStartScanHistory(pTask);
|
||||||
} else {
|
} else {
|
||||||
stDebug("s-task:%s scan wal data, status:%s", id, p);
|
stDebug("s-task:%s scan wal data, status:%s", id, p->name);
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: there will be an deadlock if launch fill history here.
|
// NOTE: there will be an deadlock if launch fill history here.
|
||||||
|
@ -393,18 +390,13 @@ int32_t streamTaskOnScanhistoryTaskReady(SStreamTask* pTask) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void doProcessDownstreamReadyRsp(SStreamTask* pTask) {
|
void doProcessDownstreamReadyRsp(SStreamTask* pTask) {
|
||||||
EStreamTaskEvent event;
|
EStreamTaskEvent event = (pTask->info.fillHistory == 0) ? TASK_EVENT_INIT : TASK_EVENT_INIT_SCANHIST;
|
||||||
if (pTask->info.fillHistory == 0) {
|
|
||||||
event = /*HAS_RELATED_FILLHISTORY_TASK(pTask) ? TASK_EVENT_INIT_STREAM_SCANHIST : */TASK_EVENT_INIT;
|
|
||||||
} else {
|
|
||||||
event = TASK_EVENT_INIT_SCANHIST;
|
|
||||||
}
|
|
||||||
|
|
||||||
streamTaskOnHandleEventSuccess(pTask->status.pSM, event);
|
streamTaskOnHandleEventSuccess(pTask->status.pSM, event);
|
||||||
|
|
||||||
int64_t initTs = pTask->execInfo.init;
|
int64_t initTs = pTask->execInfo.init;
|
||||||
int64_t startTs = pTask->execInfo.start;
|
int64_t startTs = pTask->execInfo.start;
|
||||||
streamMetaUpdateTaskDownstreamStatus(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, initTs, startTs, true);
|
streamMetaAddTaskLaunchResult(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, initTs, startTs, true);
|
||||||
|
|
||||||
// start the related fill-history task, when current task is ready
|
// start the related fill-history task, when current task is ready
|
||||||
// not invoke in success callback due to the deadlock.
|
// not invoke in success callback due to the deadlock.
|
||||||
|
@ -500,22 +492,14 @@ int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRs
|
||||||
addIntoNodeUpdateList(pTask, pRsp->downstreamNodeId);
|
addIntoNodeUpdateList(pTask, pRsp->downstreamNodeId);
|
||||||
}
|
}
|
||||||
|
|
||||||
streamMetaUpdateTaskDownstreamStatus(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, pTask->execInfo.init,
|
int32_t startTs = pTask->execInfo.init;
|
||||||
taosGetTimestampMs(), false);
|
int64_t now = taosGetTimestampMs();
|
||||||
|
streamMetaAddTaskLaunchResult(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, startTs, now, false);
|
||||||
|
|
||||||
// automatically set the related fill-history task to be failed.
|
// automatically set the related fill-history task to be failed.
|
||||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||||
STaskId* pId = &pTask->hTaskInfo.id;
|
STaskId* pId = &pTask->hTaskInfo.id;
|
||||||
int64_t current = taosGetTimestampMs();
|
streamMetaAddTaskLaunchResult(pTask->pMeta, pId->streamId, pId->taskId, startTs, now, false);
|
||||||
|
|
||||||
SStreamTask* pHTask = streamMetaAcquireTask(pTask->pMeta, pId->streamId, pId->taskId);
|
|
||||||
if (pHTask != NULL) {
|
|
||||||
streamMetaUpdateTaskDownstreamStatus(pTask->pMeta, pId->streamId, pId->taskId, pHTask->execInfo.init, current,
|
|
||||||
false);
|
|
||||||
streamMetaReleaseTask(pTask->pMeta, pHTask);
|
|
||||||
} else {
|
|
||||||
streamMetaUpdateTaskDownstreamStatus(pTask->pMeta, pId->streamId, pId->taskId, 0, current, false);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else { // TASK_DOWNSTREAM_NOT_READY, let's retry in 100ms
|
} else { // TASK_DOWNSTREAM_NOT_READY, let's retry in 100ms
|
||||||
STaskRecheckInfo* pInfo = createRecheckInfo(pTask, pRsp);
|
STaskRecheckInfo* pInfo = createRecheckInfo(pTask, pRsp);
|
||||||
|
@ -562,6 +546,15 @@ int32_t streamSetParamForScanHistory(SStreamTask* pTask) {
|
||||||
return qSetStreamOperatorOptionForScanHistory(pTask->exec.pExecutor);
|
return qSetStreamOperatorOptionForScanHistory(pTask->exec.pExecutor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t streamResetParamForScanHistory(SStreamTask* pTask) {
|
||||||
|
stDebug("s-task:%s reset operator option for scan-history data", pTask->id.idStr);
|
||||||
|
if (pTask->exec.pExecutor != NULL) {
|
||||||
|
return qResetStreamOperatorOptionForScanHistory(pTask->exec.pExecutor);
|
||||||
|
} else {
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int32_t streamRestoreParam(SStreamTask* pTask) {
|
int32_t streamRestoreParam(SStreamTask* pTask) {
|
||||||
stDebug("s-task:%s restore operator param after scan-history", pTask->id.idStr);
|
stDebug("s-task:%s restore operator param after scan-history", pTask->id.idStr);
|
||||||
return qRestoreStreamOperatorOption(pTask->exec.pExecutor);
|
return qRestoreStreamOperatorOption(pTask->exec.pExecutor);
|
||||||
|
@ -632,12 +625,11 @@ int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistory
|
||||||
ASSERT(taskLevel == TASK_LEVEL__AGG || taskLevel == TASK_LEVEL__SINK);
|
ASSERT(taskLevel == TASK_LEVEL__AGG || taskLevel == TASK_LEVEL__SINK);
|
||||||
|
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
char* p = NULL;
|
SStreamTaskState* p = streamTaskGetStatus(pTask);
|
||||||
ETaskStatus status = streamTaskGetStatus(pTask, &p);
|
|
||||||
|
|
||||||
if (status != TASK_STATUS__SCAN_HISTORY /*&& status != TASK_STATUS__STREAM_SCAN_HISTORY*/) {
|
if (p->state != TASK_STATUS__SCAN_HISTORY) {
|
||||||
stError("s-task:%s not in scan-history status, status:%s return upstream:0x%x scan-history finish directly", id, p,
|
stError("s-task:%s not in scan-history status, status:%s return upstream:0x%x scan-history finish directly", id,
|
||||||
pReq->upstreamTaskId);
|
p->name, pReq->upstreamTaskId);
|
||||||
|
|
||||||
void* pBuf = NULL;
|
void* pBuf = NULL;
|
||||||
int32_t len = 0;
|
int32_t len = 0;
|
||||||
|
@ -690,7 +682,7 @@ int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistory
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask) {
|
int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask) {
|
||||||
ETaskStatus status = streamTaskGetStatus(pTask, NULL);
|
ETaskStatus status = streamTaskGetStatus(pTask)->state;
|
||||||
|
|
||||||
// task restart now, not handle the scan-history finish rsp
|
// task restart now, not handle the scan-history finish rsp
|
||||||
if (status == TASK_STATUS__UNINIT) {
|
if (status == TASK_STATUS__UNINIT) {
|
||||||
|
@ -738,31 +730,79 @@ static void checkFillhistoryTaskStatus(SStreamTask* pTask, SStreamTask* pHTask)
|
||||||
streamTaskHandleEvent(pHTask->status.pSM, TASK_EVENT_INIT_SCANHIST);
|
streamTaskHandleEvent(pHTask->status.pSM, TASK_EVENT_INIT_SCANHIST);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void noRetryLaunchFillHistoryTask(SStreamTask* pTask, SLaunchHTaskInfo* pInfo, int64_t now) {
|
||||||
|
SStreamMeta* pMeta = pTask->pMeta;
|
||||||
|
SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo;
|
||||||
|
|
||||||
|
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||||
|
streamMetaAddTaskLaunchResult(pMeta, pInfo->hTaskId.streamId, pInfo->hTaskId.taskId, 0, now, false);
|
||||||
|
|
||||||
|
stError("s-task:%s max retry:%d reached, quit from retrying launch related fill-history task:0x%x, ref:%d",
|
||||||
|
pTask->id.idStr, MAX_RETRY_LAUNCH_HISTORY_TASK, (int32_t)pHTaskInfo->id.taskId, ref);
|
||||||
|
|
||||||
|
pHTaskInfo->id.taskId = 0;
|
||||||
|
pHTaskInfo->id.streamId = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void doRetryLaunchFillHistoryTask(SStreamTask* pTask, SLaunchHTaskInfo* pInfo, int64_t now) {
|
||||||
|
SStreamMeta* pMeta = pTask->pMeta;
|
||||||
|
SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo;
|
||||||
|
|
||||||
|
if (streamTaskShouldStop(pTask)) { // record the failure
|
||||||
|
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||||
|
stDebug("s-task:0x%" PRIx64 " stopped, not launch rel history task:0x%" PRIx64 ", ref:%d", pInfo->id.taskId,
|
||||||
|
pInfo->hTaskId.taskId, ref);
|
||||||
|
|
||||||
|
streamMetaAddTaskLaunchResult(pMeta, pInfo->hTaskId.streamId, pInfo->hTaskId.taskId, 0, now, false);
|
||||||
|
taosMemoryFree(pInfo);
|
||||||
|
} else {
|
||||||
|
char* p = streamTaskGetStatus(pTask)->name;
|
||||||
|
int32_t hTaskId = pHTaskInfo->id.taskId;
|
||||||
|
|
||||||
|
stDebug("s-task:%s status:%s failed to launch fill-history task:0x%x, retry launch:%dms, retryCount:%d",
|
||||||
|
pTask->id.idStr, p, hTaskId, pHTaskInfo->waitInterval, pHTaskInfo->retryTimes);
|
||||||
|
|
||||||
|
taosTmrReset(tryLaunchHistoryTask, LAUNCH_HTASK_INTERVAL, pInfo, streamTimer, &pHTaskInfo->pTimer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void tryLaunchHistoryTask(void* param, void* tmrId) {
|
static void tryLaunchHistoryTask(void* param, void* tmrId) {
|
||||||
SLaunchHTaskInfo* pInfo = param;
|
SLaunchHTaskInfo* pInfo = param;
|
||||||
SStreamMeta* pMeta = pInfo->pMeta;
|
SStreamMeta* pMeta = pInfo->pMeta;
|
||||||
|
int64_t now = taosGetTimestampMs();
|
||||||
|
|
||||||
streamMetaWLock(pMeta);
|
streamMetaWLock(pMeta);
|
||||||
|
|
||||||
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pInfo->id, sizeof(pInfo->id));
|
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pInfo->id, sizeof(pInfo->id));
|
||||||
if (ppTask) {
|
if (ppTask == NULL || *ppTask == NULL) {
|
||||||
ASSERT((*ppTask)->status.timerActive >= 1);
|
stError("s-task:0x%x and rel fill-history task:0x%" PRIx64 " all have been destroyed, not launch",
|
||||||
|
(int32_t)pInfo->id.taskId, pInfo->hTaskId.taskId);
|
||||||
|
streamMetaWUnLock(pMeta);
|
||||||
|
|
||||||
|
// already dropped, no need to set the failure info into the stream task meta.
|
||||||
|
taosMemoryFree(pInfo);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (streamTaskShouldStop(*ppTask)) {
|
if (streamTaskShouldStop(*ppTask)) {
|
||||||
char* p = NULL;
|
ASSERT((*ppTask)->status.timerActive >= 1);
|
||||||
streamTaskGetStatus((*ppTask), &p);
|
|
||||||
|
char* p = streamTaskGetStatus(*ppTask)->name;
|
||||||
int32_t ref = atomic_sub_fetch_32(&(*ppTask)->status.timerActive, 1);
|
int32_t ref = atomic_sub_fetch_32(&(*ppTask)->status.timerActive, 1);
|
||||||
stDebug("s-task:%s status:%s should stop, quit launch fill-history task timer, retry:%d, ref:%d",
|
stDebug("s-task:%s status:%s should stop, quit launch fill-history task timer, retry:%d, ref:%d",
|
||||||
(*ppTask)->id.idStr, p, (*ppTask)->hTaskInfo.retryTimes, ref);
|
(*ppTask)->id.idStr, p, (*ppTask)->hTaskInfo.retryTimes, ref);
|
||||||
|
|
||||||
taosMemoryFree(pInfo);
|
|
||||||
streamMetaWUnLock(pMeta);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
streamMetaWUnLock(pMeta);
|
streamMetaWUnLock(pMeta);
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pInfo->id.streamId, pInfo->id.taskId);
|
// record the related fill-history task failed
|
||||||
|
streamMetaAddTaskLaunchResult(pMeta, pInfo->hTaskId.streamId, pInfo->hTaskId.taskId, 0, now, false);
|
||||||
|
taosMemoryFree(pInfo);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
SStreamTask* pTask = streamMetaAcquireTaskNoLock(pMeta, pInfo->id.streamId, pInfo->id.taskId);
|
||||||
|
streamMetaWUnLock(pMeta);
|
||||||
|
|
||||||
if (pTask != NULL) {
|
if (pTask != NULL) {
|
||||||
SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo;
|
SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo;
|
||||||
|
|
||||||
|
@ -774,119 +814,155 @@ static void tryLaunchHistoryTask(void* param, void* tmrId) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pHTaskInfo->retryTimes > MAX_RETRY_LAUNCH_HISTORY_TASK) {
|
if (pHTaskInfo->retryTimes > MAX_RETRY_LAUNCH_HISTORY_TASK) {
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
noRetryLaunchFillHistoryTask(pTask, pInfo, now);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
|
||||||
|
|
||||||
stError("s-task:%s max retry:%d reached, quit from retrying launch related fill-history task:0x%x, ref:%d",
|
|
||||||
pTask->id.idStr, MAX_RETRY_LAUNCH_HISTORY_TASK, (int32_t)pHTaskInfo->id.taskId, ref);
|
|
||||||
|
|
||||||
pHTaskInfo->id.taskId = 0;
|
|
||||||
pHTaskInfo->id.streamId = 0;
|
|
||||||
} else { // not reach the limitation yet, let's continue retrying launch related fill-history task.
|
} else { // not reach the limitation yet, let's continue retrying launch related fill-history task.
|
||||||
streamTaskSetRetryInfoForLaunch(pHTaskInfo);
|
streamTaskSetRetryInfoForLaunch(pHTaskInfo);
|
||||||
ASSERT(pTask->status.timerActive >= 1);
|
ASSERT(pTask->status.timerActive >= 1);
|
||||||
|
|
||||||
// abort the timer if intend to stop task
|
// abort the timer if intend to stop task
|
||||||
SStreamTask* pHTask = streamMetaAcquireTask(pMeta, pHTaskInfo->id.streamId, pHTaskInfo->id.taskId);
|
SStreamTask* pHTask = streamMetaAcquireTask(pMeta, pHTaskInfo->id.streamId, pHTaskInfo->id.taskId);
|
||||||
if (pHTask == NULL && (!streamTaskShouldStop(pTask))) {
|
if (pHTask == NULL) {
|
||||||
char* p = NULL;
|
doRetryLaunchFillHistoryTask(pTask, pInfo, now);
|
||||||
int32_t hTaskId = pHTaskInfo->id.taskId;
|
|
||||||
|
|
||||||
streamTaskGetStatus(pTask, &p);
|
|
||||||
stDebug("s-task:%s status:%s failed to launch fill-history task:0x%x, retry launch:%dms, retryCount:%d",
|
|
||||||
pTask->id.idStr, p, hTaskId, pHTaskInfo->waitInterval, pHTaskInfo->retryTimes);
|
|
||||||
|
|
||||||
taosTmrReset(tryLaunchHistoryTask, LAUNCH_HTASK_INTERVAL, pInfo, streamTimer, &pHTaskInfo->pTimer);
|
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
return;
|
return;
|
||||||
}
|
} else {
|
||||||
|
|
||||||
if (pHTask != NULL) {
|
|
||||||
checkFillhistoryTaskStatus(pTask, pHTask);
|
checkFillhistoryTaskStatus(pTask, pHTask);
|
||||||
streamMetaReleaseTask(pMeta, pHTask);
|
streamMetaReleaseTask(pMeta, pHTask);
|
||||||
}
|
|
||||||
|
|
||||||
// not in timer anymore
|
// not in timer anymore
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||||
stDebug("s-task:0x%x fill-history task launch completed, retry times:%d, ref:%d", (int32_t)pInfo->id.taskId,
|
stDebug("s-task:0x%x fill-history task launch completed, retry times:%d, ref:%d", (int32_t)pInfo->id.taskId,
|
||||||
pHTaskInfo->retryTimes, ref);
|
pHTaskInfo->retryTimes, ref);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
} else {
|
} else {
|
||||||
stError("s-task:0x%x failed to load task, it may have been destroyed, not launch related fill-history task",
|
streamMetaAddTaskLaunchResult(pMeta, pInfo->hTaskId.streamId, pInfo->hTaskId.taskId, 0, now, false);
|
||||||
(int32_t)pInfo->id.taskId);
|
|
||||||
|
int32_t ref = atomic_sub_fetch_32(&(*ppTask)->status.timerActive, 1);
|
||||||
|
stError("s-task:0x%x rel fill-history task:0x%" PRIx64 " may have been destroyed, not launch, ref:%d",
|
||||||
|
(int32_t)pInfo->id.taskId, pInfo->hTaskId.taskId, ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosMemoryFree(pInfo);
|
taosMemoryFree(pInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
SLaunchHTaskInfo* createHTaskLaunchInfo(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) {
|
SLaunchHTaskInfo* createHTaskLaunchInfo(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t hStreamId,
|
||||||
|
int32_t hTaskId) {
|
||||||
SLaunchHTaskInfo* pInfo = taosMemoryCalloc(1, sizeof(SLaunchHTaskInfo));
|
SLaunchHTaskInfo* pInfo = taosMemoryCalloc(1, sizeof(SLaunchHTaskInfo));
|
||||||
if (pInfo == NULL) {
|
if (pInfo == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
pInfo->id.taskId = taskId;
|
|
||||||
pInfo->id.streamId = streamId;
|
pInfo->id.streamId = streamId;
|
||||||
|
pInfo->id.taskId = taskId;
|
||||||
|
|
||||||
|
pInfo->hTaskId.streamId = hStreamId;
|
||||||
|
pInfo->hTaskId.taskId = hTaskId;
|
||||||
|
|
||||||
pInfo->pMeta = pMeta;
|
pInfo->pMeta = pMeta;
|
||||||
return pInfo;
|
return pInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
// an fill history task needs to be started.
|
static int32_t launchNotBuiltFillHistoryTask(SStreamTask* pTask) {
|
||||||
int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) {
|
|
||||||
SStreamMeta* pMeta = pTask->pMeta;
|
SStreamMeta* pMeta = pTask->pMeta;
|
||||||
|
STaskExecStatisInfo* pExecInfo = &pTask->execInfo;
|
||||||
|
const char* idStr = pTask->id.idStr;
|
||||||
|
int64_t hStreamId = pTask->hTaskInfo.id.streamId;
|
||||||
int32_t hTaskId = pTask->hTaskInfo.id.taskId;
|
int32_t hTaskId = pTask->hTaskInfo.id.taskId;
|
||||||
|
ASSERT(hTaskId != 0);
|
||||||
|
|
||||||
ASSERT((hTaskId != 0) && (pTask->status.downstreamReady == 1));
|
stWarn("s-task:%s vgId:%d failed to launch history task:0x%x, since not built yet", idStr, pMeta->vgId, hTaskId);
|
||||||
stDebug("s-task:%s start to launch related fill-history task:0x%" PRIx64 "-0x%x", pTask->id.idStr,
|
|
||||||
pTask->hTaskInfo.id.streamId, hTaskId);
|
|
||||||
|
|
||||||
// Set the execute conditions, including the query time window and the version range
|
SLaunchHTaskInfo* pInfo = createHTaskLaunchInfo(pMeta, pTask->id.streamId, pTask->id.taskId, hStreamId, hTaskId);
|
||||||
SStreamTask** pHTask = taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id));
|
|
||||||
if (pHTask == NULL) {
|
|
||||||
stWarn("s-task:%s vgId:%d failed to launch history task:0x%x, since not built yet", pTask->id.idStr, pMeta->vgId,
|
|
||||||
hTaskId);
|
|
||||||
|
|
||||||
SLaunchHTaskInfo* pInfo = createHTaskLaunchInfo(pTask->pMeta, pTask->id.streamId, pTask->id.taskId);
|
|
||||||
if (pInfo == NULL) {
|
if (pInfo == NULL) {
|
||||||
stError("s-task:%s failed to launch related fill-history task, since Out Of Memory", pTask->id.idStr);
|
stError("s-task:%s failed to launch related fill-history task, since Out Of Memory", idStr);
|
||||||
|
|
||||||
|
streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->init, pExecInfo->start, false);
|
||||||
return terrno;
|
return terrno;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// set the launch time info
|
||||||
streamTaskInitForLaunchHTask(&pTask->hTaskInfo);
|
streamTaskInitForLaunchHTask(&pTask->hTaskInfo);
|
||||||
|
|
||||||
|
// check for the timer
|
||||||
if (pTask->hTaskInfo.pTimer == NULL) {
|
if (pTask->hTaskInfo.pTimer == NULL) {
|
||||||
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
||||||
pTask->hTaskInfo.pTimer = taosTmrStart(tryLaunchHistoryTask, WAIT_FOR_MINIMAL_INTERVAL, pInfo, streamTimer);
|
pTask->hTaskInfo.pTimer = taosTmrStart(tryLaunchHistoryTask, WAIT_FOR_MINIMAL_INTERVAL, pInfo, streamTimer);
|
||||||
|
|
||||||
if (pTask->hTaskInfo.pTimer == NULL) {
|
if (pTask->hTaskInfo.pTimer == NULL) {
|
||||||
atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||||
stError("s-task:%s failed to start timer, related fill-history task not launched, ref:%d", pTask->id.idStr,
|
stError("s-task:%s failed to start timer, related fill-history task not launched, ref:%d", idStr, ref);
|
||||||
pTask->status.timerActive);
|
|
||||||
taosMemoryFree(pInfo);
|
taosMemoryFree(pInfo);
|
||||||
} else {
|
streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->init, pExecInfo->start, false);
|
||||||
ASSERT(ref >= 1);
|
return terrno;
|
||||||
stDebug("s-task:%s set timer active flag, ref:%d", pTask->id.idStr, ref);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ASSERT(ref >= 1);
|
||||||
|
|
||||||
|
stDebug("s-task:%s set timer active flag, ref:%d", idStr, ref);
|
||||||
} else { // timer exists
|
} else { // timer exists
|
||||||
ASSERT(pTask->status.timerActive >= 1);
|
ASSERT(pTask->status.timerActive >= 1);
|
||||||
stDebug("s-task:%s set timer active flag, task timer not null", pTask->id.idStr);
|
stDebug("s-task:%s set timer active flag, task timer not null", idStr);
|
||||||
taosTmrReset(tryLaunchHistoryTask, WAIT_FOR_MINIMAL_INTERVAL, pInfo, streamTimer, &pTask->hTaskInfo.pTimer);
|
taosTmrReset(tryLaunchHistoryTask, WAIT_FOR_MINIMAL_INTERVAL, pInfo, streamTimer, &pTask->hTaskInfo.pTimer);
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((*pHTask)->status.downstreamReady == 1) {
|
// an fill history task needs to be started.
|
||||||
stDebug("s-task:%s fill-history task is ready, no need to check downstream", (*pHTask)->id.idStr);
|
int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) {
|
||||||
|
SStreamMeta* pMeta = pTask->pMeta;
|
||||||
|
STaskExecStatisInfo* pExecInfo = &pTask->execInfo;
|
||||||
|
const char* idStr = pTask->id.idStr;
|
||||||
|
int64_t hStreamId = pTask->hTaskInfo.id.streamId;
|
||||||
|
int32_t hTaskId = pTask->hTaskInfo.id.taskId;
|
||||||
|
ASSERT(hTaskId != 0);
|
||||||
|
|
||||||
|
// check stream task status in the first place.
|
||||||
|
SStreamTaskState* pStatus = streamTaskGetStatus(pTask);
|
||||||
|
if (pStatus->state != TASK_STATUS__READY) {
|
||||||
|
stDebug("s-task:%s not launch related fill-history task:0x%" PRIx64 "-0x%x, status:%s", idStr, hStreamId, hTaskId,
|
||||||
|
pStatus->name);
|
||||||
|
|
||||||
|
streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->init, pExecInfo->start, false);
|
||||||
|
return -1; // todo set the correct error code
|
||||||
|
}
|
||||||
|
|
||||||
|
stDebug("s-task:%s start to launch related fill-history task:0x%" PRIx64 "-0x%x", idStr, hStreamId, hTaskId);
|
||||||
|
|
||||||
|
// Set the execute conditions, including the query time window and the version range
|
||||||
|
streamMetaRLock(pMeta);
|
||||||
|
SStreamTask** pHTask = taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id));
|
||||||
|
streamMetaRUnLock(pMeta);
|
||||||
|
|
||||||
|
if (pHTask != NULL) { // it is already added into stream meta store.
|
||||||
|
SStreamTask* pHisTask = streamMetaAcquireTask(pMeta, hStreamId, hTaskId);
|
||||||
|
if (pHisTask == NULL) {
|
||||||
|
stDebug("s-task:%s failed acquire and start fill-history task, it may have been dropped/stopped", idStr);
|
||||||
|
streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->init, pExecInfo->start, false);
|
||||||
} else {
|
} else {
|
||||||
checkFillhistoryTaskStatus(pTask, *pHTask);
|
if (pHisTask->status.downstreamReady == 1) { // it's ready now, do nothing
|
||||||
|
stDebug("s-task:%s fill-history task is ready, no need to check downstream", pHisTask->id.idStr);
|
||||||
|
streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->init, pExecInfo->start, true);
|
||||||
|
} else { // exist, but not ready, continue check downstream task status
|
||||||
|
checkFillhistoryTaskStatus(pTask, pHisTask);
|
||||||
|
}
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pMeta, pHisTask);
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
} else {
|
||||||
|
return launchNotBuiltFillHistoryTask(pTask);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask) {
|
int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask) {
|
||||||
if (streamTaskGetStatus(pTask, NULL) == TASK_STATUS__DROPPING) {
|
if (streamTaskGetStatus(pTask)->state == TASK_STATUS__DROPPING) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1029,6 +1105,11 @@ void streamTaskSetRangeStreamCalc(SStreamTask* pTask) {
|
||||||
pTask->id.idStr, pRange->window.skey, pRange->window.ekey, pRange->range.minVer, pRange->range.maxVer);
|
pTask->id.idStr, pRange->window.skey, pRange->window.ekey, pRange->range.minVer, pRange->range.maxVer);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
ASSERT(pTask->info.fillHistory == 0);
|
||||||
|
if (pTask->info.taskLevel >= TASK_LEVEL__AGG) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
int64_t ekey = 0;
|
int64_t ekey = 0;
|
||||||
if (pRange->window.ekey < INT64_MAX) {
|
if (pRange->window.ekey < INT64_MAX) {
|
||||||
ekey = pRange->window.ekey + 1;
|
ekey = pRange->window.ekey + 1;
|
||||||
|
@ -1043,10 +1124,13 @@ void streamTaskSetRangeStreamCalc(SStreamTask* pTask) {
|
||||||
pRange->range.minVer = 0;
|
pRange->range.minVer = 0;
|
||||||
pRange->range.maxVer = ver;
|
pRange->range.maxVer = ver;
|
||||||
|
|
||||||
stDebug("s-task:%s level:%d related fill-history task exists, update stream calc time window:%" PRId64 " - %" PRId64
|
stDebug("s-task:%s level:%d related fill-history task exists, set stream task timeWindow:%" PRId64 " - %" PRId64
|
||||||
", verRang:%" PRId64 " - %" PRId64,
|
", verRang:%" PRId64 " - %" PRId64,
|
||||||
pTask->id.idStr, pTask->info.taskLevel, pRange->window.skey, pRange->window.ekey, pRange->range.minVer,
|
pTask->id.idStr, pTask->info.taskLevel, pRange->window.skey, pRange->window.ekey, ver, INT64_MAX);
|
||||||
pRange->range.maxVer);
|
|
||||||
|
SVersionRange verRange = {.minVer = ver, .maxVer = INT64_MAX};
|
||||||
|
STimeWindow win = pRange->window;
|
||||||
|
streamSetParamForStreamScannerStep2(pTask, &verRange, &win);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1066,23 +1150,21 @@ void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamTaskResume(SStreamTask* pTask) {
|
void streamTaskResume(SStreamTask* pTask) {
|
||||||
char* p = NULL;
|
SStreamTaskState prevState = *streamTaskGetStatus(pTask);
|
||||||
ETaskStatus status = streamTaskGetStatus(pTask, &p);
|
|
||||||
SStreamMeta* pMeta = pTask->pMeta;
|
SStreamMeta* pMeta = pTask->pMeta;
|
||||||
|
|
||||||
if (status == TASK_STATUS__PAUSE || status == TASK_STATUS__HALT) {
|
if (prevState.state == TASK_STATUS__PAUSE || prevState.state == TASK_STATUS__HALT) {
|
||||||
streamTaskRestoreStatus(pTask);
|
streamTaskRestoreStatus(pTask);
|
||||||
|
|
||||||
char* pNew = NULL;
|
char* pNew = streamTaskGetStatus(pTask)->name;
|
||||||
streamTaskGetStatus(pTask, &pNew);
|
if (prevState.state == TASK_STATUS__PAUSE) {
|
||||||
if (status == TASK_STATUS__PAUSE) {
|
|
||||||
int32_t num = atomic_sub_fetch_32(&pMeta->numOfPausedTasks, 1);
|
int32_t num = atomic_sub_fetch_32(&pMeta->numOfPausedTasks, 1);
|
||||||
stInfo("s-task:%s status:%s resume from %s, paused task(s):%d", pTask->id.idStr, pNew, p, num);
|
stInfo("s-task:%s status:%s resume from %s, paused task(s):%d", pTask->id.idStr, pNew, prevState.name, num);
|
||||||
} else {
|
} else {
|
||||||
stInfo("s-task:%s status:%s resume from %s", pTask->id.idStr, pNew, p);
|
stInfo("s-task:%s status:%s resume from %s", pTask->id.idStr, pNew, prevState.name);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
stDebug("s-task:%s status:%s not in pause/halt status, no need to resume", pTask->id.idStr, p);
|
stDebug("s-task:%s status:%s not in pause/halt status, no need to resume", pTask->id.idStr, prevState.name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1108,7 +1190,7 @@ static void displayStatusInfo(SStreamMeta* pMeta, SHashObj* pTaskSet, bool succ)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamMetaUpdateTaskDownstreamStatus(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs,
|
int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs,
|
||||||
int64_t endTs, bool ready) {
|
int64_t endTs, bool ready) {
|
||||||
STaskStartInfo* pStartInfo = &pMeta->startInfo;
|
STaskStartInfo* pStartInfo = &pMeta->startInfo;
|
||||||
STaskId id = {.streamId = streamId, .taskId = taskId};
|
STaskId id = {.streamId = streamId, .taskId = taskId};
|
||||||
|
|
|
@ -309,7 +309,9 @@ void tFreeStreamTask(SStreamTask* pTask) {
|
||||||
ETaskStatus status1 = TASK_STATUS__UNINIT;
|
ETaskStatus status1 = TASK_STATUS__UNINIT;
|
||||||
taosThreadMutexLock(&pTask->lock);
|
taosThreadMutexLock(&pTask->lock);
|
||||||
if (pTask->status.pSM != NULL) {
|
if (pTask->status.pSM != NULL) {
|
||||||
status1 = streamTaskGetStatus(pTask, &p);
|
SStreamTaskState* pStatus = streamTaskGetStatus(pTask);
|
||||||
|
p = pStatus->name;
|
||||||
|
status1 = pStatus->state;
|
||||||
}
|
}
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
|
||||||
|
@ -328,9 +330,9 @@ void tFreeStreamTask(SStreamTask* pTask) {
|
||||||
taosMsleep(100);
|
taosMsleep(100);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pTask->schedInfo.pTimer != NULL) {
|
if (pTask->schedInfo.pDelayTimer != NULL) {
|
||||||
taosTmrStop(pTask->schedInfo.pTimer);
|
taosTmrStop(pTask->schedInfo.pDelayTimer);
|
||||||
pTask->schedInfo.pTimer = NULL;
|
pTask->schedInfo.pDelayTimer = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pTask->hTaskInfo.pTimer != NULL) {
|
if (pTask->hTaskInfo.pTimer != NULL) {
|
||||||
|
|
|
@ -38,7 +38,6 @@ SStreamEventInfo StreamTaskEventList[12] = {
|
||||||
{.event = 0, .name = ""}, // dummy event, place holder
|
{.event = 0, .name = ""}, // dummy event, place holder
|
||||||
{.event = TASK_EVENT_INIT, .name = "initialize"},
|
{.event = TASK_EVENT_INIT, .name = "initialize"},
|
||||||
{.event = TASK_EVENT_INIT_SCANHIST, .name = "scan-history-init"},
|
{.event = TASK_EVENT_INIT_SCANHIST, .name = "scan-history-init"},
|
||||||
// {.event = TASK_EVENT_INIT_STREAM_SCANHIST, .name = "stream-scan-history-init"},
|
|
||||||
{.event = TASK_EVENT_SCANHIST_DONE, .name = "scan-history-completed"},
|
{.event = TASK_EVENT_SCANHIST_DONE, .name = "scan-history-completed"},
|
||||||
{.event = TASK_EVENT_STOP, .name = "stopping"},
|
{.event = TASK_EVENT_STOP, .name = "stopping"},
|
||||||
{.event = TASK_EVENT_GEN_CHECKPOINT, .name = "checkpoint"},
|
{.event = TASK_EVENT_GEN_CHECKPOINT, .name = "checkpoint"},
|
||||||
|
@ -65,8 +64,7 @@ static STaskStateTrans createStateTransform(ETaskStatus current, ETaskStatus nex
|
||||||
static int32_t dummyFn(SStreamTask* UNUSED_PARAM(p)) { return TSDB_CODE_SUCCESS; }
|
static int32_t dummyFn(SStreamTask* UNUSED_PARAM(p)) { return TSDB_CODE_SUCCESS; }
|
||||||
|
|
||||||
static int32_t attachEvent(SStreamTask* pTask, SAttachedEventInfo* pEvtInfo) {
|
static int32_t attachEvent(SStreamTask* pTask, SAttachedEventInfo* pEvtInfo) {
|
||||||
char* p = NULL;
|
char* p = streamTaskGetStatus(pTask)->name;
|
||||||
streamTaskGetStatus(pTask, &p);
|
|
||||||
|
|
||||||
stDebug("s-task:%s status:%s attach event:%s required status:%s, since not allowed to handle it", pTask->id.idStr, p,
|
stDebug("s-task:%s status:%s attach event:%s required status:%s, since not allowed to handle it", pTask->id.idStr, p,
|
||||||
GET_EVT_NAME(pEvtInfo->event), StreamTaskStatusList[pEvtInfo->status].name);
|
GET_EVT_NAME(pEvtInfo->event), StreamTaskStatusList[pEvtInfo->status].name);
|
||||||
|
@ -110,12 +108,12 @@ int32_t streamTaskKeepCurrentVerInWal(SStreamTask* pTask) {
|
||||||
|
|
||||||
// todo check rsp code for handle Event:TASK_EVENT_SCANHIST_DONE
|
// todo check rsp code for handle Event:TASK_EVENT_SCANHIST_DONE
|
||||||
static bool isInvalidStateTransfer(ETaskStatus state, const EStreamTaskEvent event) {
|
static bool isInvalidStateTransfer(ETaskStatus state, const EStreamTaskEvent event) {
|
||||||
if (/*event == TASK_EVENT_INIT_STREAM_SCANHIST || */event == TASK_EVENT_INIT || event == TASK_EVENT_INIT_SCANHIST) {
|
if (event == TASK_EVENT_INIT || event == TASK_EVENT_INIT_SCANHIST) {
|
||||||
return (state != TASK_STATUS__UNINIT);
|
return (state != TASK_STATUS__UNINIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (event == TASK_EVENT_SCANHIST_DONE) {
|
if (event == TASK_EVENT_SCANHIST_DONE) {
|
||||||
return (state != TASK_STATUS__SCAN_HISTORY/* && state != TASK_STATUS__STREAM_SCAN_HISTORY*/);
|
return (state != TASK_STATUS__SCAN_HISTORY);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (event == TASK_EVENT_GEN_CHECKPOINT) {
|
if (event == TASK_EVENT_GEN_CHECKPOINT) {
|
||||||
|
@ -275,7 +273,7 @@ static int32_t doHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event, STaskSt
|
||||||
while (1) {
|
while (1) {
|
||||||
// wait for the task to be here
|
// wait for the task to be here
|
||||||
taosThreadMutexLock(&pTask->lock);
|
taosThreadMutexLock(&pTask->lock);
|
||||||
ETaskStatus s = streamTaskGetStatus(pTask, NULL);
|
ETaskStatus s = streamTaskGetStatus(pTask)->state;
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
|
||||||
if ((s == pTrans->next.state) && (pSM->prev.evt == pTrans->event)) {// this event has been handled already
|
if ((s == pTrans->next.state) && (pSM->prev.evt == pTrans->event)) {// this event has been handled already
|
||||||
|
@ -400,12 +398,8 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
ETaskStatus streamTaskGetStatus(const SStreamTask* pTask, char** pStr) {
|
SStreamTaskState* streamTaskGetStatus(const SStreamTask* pTask) {
|
||||||
SStreamTaskState s = pTask->status.pSM->current; // copy one obj in case of multi-thread environment
|
return &pTask->status.pSM->current; // copy one obj in case of multi-thread environment
|
||||||
if (pStr != NULL) {
|
|
||||||
*pStr = s.name;
|
|
||||||
}
|
|
||||||
return s.state;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* streamTaskGetStatusStr(ETaskStatus status) {
|
const char* streamTaskGetStatusStr(ETaskStatus status) {
|
||||||
|
@ -482,14 +476,10 @@ void doInitStateTransferTable(void) {
|
||||||
taosArrayPush(streamTaskSMTrans, &trans);
|
taosArrayPush(streamTaskSMTrans, &trans);
|
||||||
trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__SCAN_HISTORY, TASK_EVENT_INIT_SCANHIST, streamTaskInitStatus, streamTaskOnScanhistoryTaskReady, false, false);
|
trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__SCAN_HISTORY, TASK_EVENT_INIT_SCANHIST, streamTaskInitStatus, streamTaskOnScanhistoryTaskReady, false, false);
|
||||||
taosArrayPush(streamTaskSMTrans, &trans);
|
taosArrayPush(streamTaskSMTrans, &trans);
|
||||||
// trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__STREAM_SCAN_HISTORY, TASK_EVENT_INIT_STREAM_SCANHIST, streamTaskInitStatus, streamTaskOnScanhistoryTaskReady, false, false);
|
|
||||||
// taosArrayPush(streamTaskSMTrans, &trans);
|
|
||||||
|
|
||||||
// scan-history related event
|
// scan-history related event
|
||||||
trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__READY, TASK_EVENT_SCANHIST_DONE, NULL, NULL, NULL, true);
|
trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__READY, TASK_EVENT_SCANHIST_DONE, NULL, NULL, NULL, true);
|
||||||
taosArrayPush(streamTaskSMTrans, &trans);
|
taosArrayPush(streamTaskSMTrans, &trans);
|
||||||
// trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__READY, TASK_EVENT_SCANHIST_DONE, NULL, NULL, NULL, true);
|
|
||||||
// taosArrayPush(streamTaskSMTrans, &trans);
|
|
||||||
|
|
||||||
// halt stream task, from other task status
|
// halt stream task, from other task status
|
||||||
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL, true);
|
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL, true);
|
||||||
|
@ -499,8 +489,6 @@ void doInitStateTransferTable(void) {
|
||||||
|
|
||||||
SAttachedEventInfo info = {.status = TASK_STATUS__READY, .event = TASK_EVENT_HALT};
|
SAttachedEventInfo info = {.status = TASK_STATUS__READY, .event = TASK_EVENT_HALT};
|
||||||
|
|
||||||
// trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, &info, true);
|
|
||||||
// taosArrayPush(streamTaskSMTrans, &trans);
|
|
||||||
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, &info, true);
|
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, &info, true);
|
||||||
taosArrayPush(streamTaskSMTrans, &trans);
|
taosArrayPush(streamTaskSMTrans, &trans);
|
||||||
trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL, true);
|
trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL, true);
|
||||||
|
@ -519,8 +507,6 @@ void doInitStateTransferTable(void) {
|
||||||
taosArrayPush(streamTaskSMTrans, &trans);
|
taosArrayPush(streamTaskSMTrans, &trans);
|
||||||
|
|
||||||
info = (SAttachedEventInfo){.status = TASK_STATUS__READY, .event = TASK_EVENT_PAUSE};
|
info = (SAttachedEventInfo){.status = TASK_STATUS__READY, .event = TASK_EVENT_PAUSE};
|
||||||
// trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info, true);
|
|
||||||
// taosArrayPush(streamTaskSMTrans, &trans);
|
|
||||||
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info, true);
|
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info, true);
|
||||||
taosArrayPush(streamTaskSMTrans, &trans);
|
taosArrayPush(streamTaskSMTrans, &trans);
|
||||||
trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info, true);
|
trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info, true);
|
||||||
|
@ -554,8 +540,6 @@ void doInitStateTransferTable(void) {
|
||||||
taosArrayPush(streamTaskSMTrans, &trans);
|
taosArrayPush(streamTaskSMTrans, &trans);
|
||||||
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true);
|
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true);
|
||||||
taosArrayPush(streamTaskSMTrans, &trans);
|
taosArrayPush(streamTaskSMTrans, &trans);
|
||||||
// trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true);
|
|
||||||
// taosArrayPush(streamTaskSMTrans, &trans);
|
|
||||||
|
|
||||||
// dropping related event
|
// dropping related event
|
||||||
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true);
|
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true);
|
||||||
|
@ -574,7 +558,5 @@ void doInitStateTransferTable(void) {
|
||||||
taosArrayPush(streamTaskSMTrans, &trans);
|
taosArrayPush(streamTaskSMTrans, &trans);
|
||||||
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, streamTaskSendTransSuccessMsg, NULL, NULL, true);
|
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, streamTaskSendTransSuccessMsg, NULL, NULL, true);
|
||||||
taosArrayPush(streamTaskSMTrans, &trans);
|
taosArrayPush(streamTaskSMTrans, &trans);
|
||||||
// trans = createStateTransform(TASK_STATUS__STREAM_SCAN_HISTORY, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true);
|
|
||||||
// taosArrayPush(streamTaskSMTrans, &trans);
|
|
||||||
}
|
}
|
||||||
//clang-format on
|
//clang-format on
|
|
@ -26,6 +26,7 @@
|
||||||
#define FLUSH_NUM 4
|
#define FLUSH_NUM 4
|
||||||
#define DEFAULT_MAX_STREAM_BUFFER_SIZE (128 * 1024 * 1024)
|
#define DEFAULT_MAX_STREAM_BUFFER_SIZE (128 * 1024 * 1024)
|
||||||
#define MIN_NUM_OF_ROW_BUFF 10240
|
#define MIN_NUM_OF_ROW_BUFF 10240
|
||||||
|
#define MIN_NUM_OF_RECOVER_ROW_BUFF 128
|
||||||
|
|
||||||
#define TASK_KEY "streamFileState"
|
#define TASK_KEY "streamFileState"
|
||||||
#define STREAM_STATE_INFO_NAME "StreamStateCheckPoint"
|
#define STREAM_STATE_INFO_NAME "StreamStateCheckPoint"
|
||||||
|
@ -660,7 +661,7 @@ int32_t recoverSesssion(SStreamFileState* pFileState, int64_t ckId) {
|
||||||
if (pCur == NULL) {
|
if (pCur == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
int32_t recoverNum = TMIN(MIN_NUM_OF_ROW_BUFF, pFileState->maxRowCount);
|
int32_t recoverNum = TMIN(MIN_NUM_OF_RECOVER_ROW_BUFF, pFileState->maxRowCount);
|
||||||
while (code == TSDB_CODE_SUCCESS) {
|
while (code == TSDB_CODE_SUCCESS) {
|
||||||
if (pFileState->curRowCount >= recoverNum) {
|
if (pFileState->curRowCount >= recoverNum) {
|
||||||
break;
|
break;
|
||||||
|
@ -694,7 +695,7 @@ int32_t recoverSnapshot(SStreamFileState* pFileState, int64_t ckId) {
|
||||||
if (pCur == NULL) {
|
if (pCur == NULL) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
int32_t recoverNum = TMIN(MIN_NUM_OF_ROW_BUFF, pFileState->maxRowCount);
|
int32_t recoverNum = TMIN(MIN_NUM_OF_RECOVER_ROW_BUFF, pFileState->maxRowCount);
|
||||||
while (code == TSDB_CODE_SUCCESS) {
|
while (code == TSDB_CODE_SUCCESS) {
|
||||||
if (pFileState->curRowCount >= recoverNum) {
|
if (pFileState->curRowCount >= recoverNum) {
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include "syncRaftStore.h"
|
#include "syncRaftStore.h"
|
||||||
#include "syncReplication.h"
|
#include "syncReplication.h"
|
||||||
#include "syncUtil.h"
|
#include "syncUtil.h"
|
||||||
|
#include "tglobal.h"
|
||||||
|
|
||||||
static SyncIndex syncNodeGetSnapBeginIndex(SSyncNode *ths);
|
static SyncIndex syncNodeGetSnapBeginIndex(SSyncNode *ths);
|
||||||
|
|
||||||
|
@ -1186,7 +1187,7 @@ static int32_t syncSnapBufferSend(SSyncSnapshotSender *pSender, SyncSnapshotRsp
|
||||||
pSndBuf->start = ack + 1;
|
pSndBuf->start = ack + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (pSender->seq != SYNC_SNAPSHOT_SEQ_END && pSender->seq - pSndBuf->start < (pSndBuf->size >> 2)) {
|
while (pSender->seq != SYNC_SNAPSHOT_SEQ_END && pSender->seq - pSndBuf->start < tsSnapReplMaxWaitN) {
|
||||||
if (snapshotSend(pSender) != 0) {
|
if (snapshotSend(pSender) != 0) {
|
||||||
code = terrno;
|
code = terrno;
|
||||||
goto _out;
|
goto _out;
|
||||||
|
|
|
@ -48,6 +48,14 @@ int32_t tmsgSendReq(const SEpSet* epSet, SRpcMsg* pMsg) {
|
||||||
}
|
}
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
int32_t tmsgSendSyncReq(const SEpSet* epSet, SRpcMsg* pMsg) {
|
||||||
|
int32_t code = (*defaultMsgCb.sendSyncReqFp)(epSet, pMsg);
|
||||||
|
if (code != 0) {
|
||||||
|
rpcFreeCont(pMsg->pCont);
|
||||||
|
pMsg->pCont = NULL;
|
||||||
|
}
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
void tmsgSendRsp(SRpcMsg* pMsg) {
|
void tmsgSendRsp(SRpcMsg* pMsg) {
|
||||||
#if 1
|
#if 1
|
||||||
|
|
|
@ -1907,7 +1907,12 @@ bool cliRecvReleaseReq(SCliConn* conn, STransMsgHead* pHead) {
|
||||||
static void* cliWorkThread(void* arg) {
|
static void* cliWorkThread(void* arg) {
|
||||||
SCliThrd* pThrd = (SCliThrd*)arg;
|
SCliThrd* pThrd = (SCliThrd*)arg;
|
||||||
pThrd->pid = taosGetSelfPthreadId();
|
pThrd->pid = taosGetSelfPthreadId();
|
||||||
setThreadName("trans-cli-work");
|
|
||||||
|
char threadName[TSDB_LABEL_LEN] = {0};
|
||||||
|
STrans* pInst = pThrd->pTransInst;
|
||||||
|
strtolower(threadName, pInst->label);
|
||||||
|
setThreadName(threadName);
|
||||||
|
|
||||||
uv_run(pThrd->loop, UV_RUN_DEFAULT);
|
uv_run(pThrd->loop, UV_RUN_DEFAULT);
|
||||||
|
|
||||||
tDebug("thread quit-thread:%08" PRId64, pThrd->pid);
|
tDebug("thread quit-thread:%08" PRId64, pThrd->pid);
|
||||||
|
@ -2701,6 +2706,7 @@ int transSendRecvWithTimeout(void* shandle, SEpSet* pEpSet, STransMsg* pReq, STr
|
||||||
ret = TSDB_CODE_TIMEOUT_ERROR;
|
ret = TSDB_CODE_TIMEOUT_ERROR;
|
||||||
} else {
|
} else {
|
||||||
memcpy(pRsp, pSyncMsg->pRsp, sizeof(STransMsg));
|
memcpy(pRsp, pSyncMsg->pRsp, sizeof(STransMsg));
|
||||||
|
pSyncMsg->pRsp->pCont = NULL;
|
||||||
if (pSyncMsg->hasEpSet == 1) {
|
if (pSyncMsg->hasEpSet == 1) {
|
||||||
epsetAssign(pEpSet, &pSyncMsg->epSet);
|
epsetAssign(pEpSet, &pSyncMsg->epSet);
|
||||||
*epUpdated = 1;
|
*epUpdated = 1;
|
||||||
|
|
|
@ -667,7 +667,7 @@ void transDestroySyncMsg(void* msg) {
|
||||||
STransSyncMsg* pSyncMsg = msg;
|
STransSyncMsg* pSyncMsg = msg;
|
||||||
tsem_destroy(pSyncMsg->pSem);
|
tsem_destroy(pSyncMsg->pSem);
|
||||||
taosMemoryFree(pSyncMsg->pSem);
|
taosMemoryFree(pSyncMsg->pSem);
|
||||||
|
transFreeMsg(pSyncMsg->pRsp->pCont);
|
||||||
taosMemoryFree(pSyncMsg->pRsp);
|
taosMemoryFree(pSyncMsg->pRsp);
|
||||||
taosMemoryFree(pSyncMsg);
|
taosMemoryFree(pSyncMsg);
|
||||||
}
|
}
|
||||||
|
|
|
@ -305,8 +305,8 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t walSkipFetchBody(SWalReader *pRead) {
|
int32_t walSkipFetchBody(SWalReader *pRead) {
|
||||||
wDebug("vgId:%d, skip fetch body %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64
|
wDebug("vgId:%d, skip fetch body:%" PRId64 ", first:%" PRId64 ", commit:%" PRId64 ", last:%" PRId64
|
||||||
", applied ver:%" PRId64", 0x%"PRIx64,
|
", applied:%" PRId64 ", 0x%" PRIx64,
|
||||||
pRead->pWal->cfg.vgId, pRead->pHead->head.version, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer,
|
pRead->pWal->cfg.vgId, pRead->pHead->head.version, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer,
|
||||||
pRead->pWal->vers.lastVer, pRead->pWal->vers.appliedVer, pRead->readerId);
|
pRead->pWal->vers.lastVer, pRead->pWal->vers.appliedVer, pRead->readerId);
|
||||||
|
|
||||||
|
@ -325,11 +325,11 @@ int32_t walFetchBody(SWalReader *pRead) {
|
||||||
int64_t ver = pReadHead->version;
|
int64_t ver = pReadHead->version;
|
||||||
int32_t vgId = pRead->pWal->cfg.vgId;
|
int32_t vgId = pRead->pWal->cfg.vgId;
|
||||||
int64_t id = pRead->readerId;
|
int64_t id = pRead->readerId;
|
||||||
|
SWalVer *pVer = &pRead->pWal->vers;
|
||||||
|
|
||||||
wDebug("vgId:%d, fetch body %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64
|
wDebug("vgId:%d, fetch body:%" PRId64 ", first:%" PRId64 ", commit:%" PRId64 ", last:%" PRId64 ", applied:%" PRId64
|
||||||
", applied ver:%" PRId64 ", 0x%" PRIx64,
|
", 0x%" PRIx64,
|
||||||
vgId, ver, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer, pRead->pWal->vers.lastVer,
|
vgId, ver, pVer->firstVer, pVer->commitVer, pVer->lastVer, pVer->appliedVer, id);
|
||||||
pRead->pWal->vers.appliedVer, id);
|
|
||||||
|
|
||||||
if (pRead->capacity < pReadHead->bodyLen) {
|
if (pRead->capacity < pReadHead->bodyLen) {
|
||||||
SWalCkHead *ptr = (SWalCkHead *)taosMemoryRealloc(pRead->pHead, sizeof(SWalCkHead) + pReadHead->bodyLen);
|
SWalCkHead *ptr = (SWalCkHead *)taosMemoryRealloc(pRead->pHead, sizeof(SWalCkHead) + pReadHead->bodyLen);
|
||||||
|
|
|
@ -34,7 +34,7 @@ class TDTestCase(TBase):
|
||||||
tdLog.info(f"insert data.")
|
tdLog.info(f"insert data.")
|
||||||
# taosBenchmark run
|
# taosBenchmark run
|
||||||
jfile = etool.curFile(__file__, "snapshot.json")
|
jfile = etool.curFile(__file__, "snapshot.json")
|
||||||
etool.runBenchmark(json=jfile)
|
etool.benchMark(json=jfile)
|
||||||
|
|
||||||
tdSql.execute(f"use {self.db}")
|
tdSql.execute(f"use {self.db}")
|
||||||
# set insert data information
|
# set insert data information
|
||||||
|
|
|
@ -0,0 +1,144 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import random
|
||||||
|
|
||||||
|
import taos
|
||||||
|
import frame.etool
|
||||||
|
|
||||||
|
from frame.log import *
|
||||||
|
from frame.sql import *
|
||||||
|
from frame.cases import *
|
||||||
|
from frame.caseBase import *
|
||||||
|
from frame.srvCtl import *
|
||||||
|
from frame import *
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase(TBase):
|
||||||
|
def insertData(self):
|
||||||
|
tdLog.info(f"insert data.")
|
||||||
|
|
||||||
|
# set insert data information
|
||||||
|
self.childtable_count = 10
|
||||||
|
self.insert_rows = 10000
|
||||||
|
self.timestamp_step = 1000
|
||||||
|
|
||||||
|
# taosBenchmark run
|
||||||
|
etool.benchMark(command = f"-d {self.db} -t {self.childtable_count} -n {self.insert_rows} -v 2 -y")
|
||||||
|
|
||||||
|
def doTaos(self):
|
||||||
|
tdLog.info(f"check taos command options...")
|
||||||
|
# help
|
||||||
|
rets = etool.runBinFile("taos", "--help")
|
||||||
|
self.checkListNotEmpty(rets)
|
||||||
|
# b r w s
|
||||||
|
sql = f"select * from {self.db}.{self.stb} limit 10"
|
||||||
|
rets = etool.runBinFile("taos", f'-B -r -w 100 -s "{sql}" ')
|
||||||
|
self.checkListNotEmpty(rets)
|
||||||
|
# -C
|
||||||
|
rets = etool.runBinFile("taos", "-C")
|
||||||
|
self.checkListNotEmpty(rets)
|
||||||
|
# -t
|
||||||
|
rets = etool.runBinFile("taos", "-t")
|
||||||
|
self.checkListNotEmpty(rets)
|
||||||
|
# -v
|
||||||
|
rets = etool.runBinFile("taos", "-V")
|
||||||
|
self.checkListNotEmpty(rets)
|
||||||
|
# -?
|
||||||
|
rets = etool.runBinFile("taos", "-?")
|
||||||
|
self.checkListNotEmpty(rets)
|
||||||
|
|
||||||
|
# TSDB_FQDN_LEN = 128
|
||||||
|
lname = "testhostnamelength"
|
||||||
|
lname.rjust(130, 'a')
|
||||||
|
|
||||||
|
# except test
|
||||||
|
sql = f"show vgroups;"
|
||||||
|
etool.exeBinFile("taos", f'-h {lname} -s "{sql}" ', wait=False)
|
||||||
|
etool.exeBinFile("taos", f'-u {lname} -s "{sql}" ', wait=False)
|
||||||
|
etool.exeBinFile("taos", f'-d {lname} -s "{sql}" ', wait=False)
|
||||||
|
etool.exeBinFile("taos", f'-a {lname} -s "{sql}" ', wait=False)
|
||||||
|
etool.exeBinFile("taos", f'-p{lname} -s "{sql}" ', wait=False)
|
||||||
|
etool.exeBinFile("taos", f'-w -s "{sql}" ', wait=False)
|
||||||
|
|
||||||
|
# others
|
||||||
|
etool.exeBinFile("taos", f'-N 200 -l 2048 -s "{sql}" ', wait=False)
|
||||||
|
etool.exeBinFile("taos", f'-n server', wait=False)
|
||||||
|
|
||||||
|
|
||||||
|
def doTaosd(self):
|
||||||
|
tdLog.info(f"check taosd command options...")
|
||||||
|
idx = 1 # dnode1
|
||||||
|
cfg = sc.dnodeCfgPath(idx)
|
||||||
|
|
||||||
|
# -s
|
||||||
|
sdb = "./sdb.json"
|
||||||
|
eos.delFile(sdb)
|
||||||
|
etool.runBinFile("taosd", f"-s -c {cfg}")
|
||||||
|
self.checkFileExist(sdb)
|
||||||
|
|
||||||
|
# -C
|
||||||
|
etool.runBinFile("taosd", "-C")
|
||||||
|
# -k
|
||||||
|
rets = etool.runBinFile("taosd", "-C")
|
||||||
|
self.checkListNotEmpty(rets)
|
||||||
|
# -V
|
||||||
|
rets = etool.runBinFile("taosd", "-V")
|
||||||
|
self.checkListNotEmpty(rets)
|
||||||
|
# --help
|
||||||
|
rets = etool.runBinFile("taosd", "--help")
|
||||||
|
self.checkListNotEmpty(rets)
|
||||||
|
|
||||||
|
# except input
|
||||||
|
etool.exeBinFile("taosd", "-c")
|
||||||
|
etool.exeBinFile("taosd", "-e")
|
||||||
|
|
||||||
|
# stop taosd
|
||||||
|
sc.dnodeStop(idx)
|
||||||
|
# other
|
||||||
|
etool.exeBinFile("taosd", f"-dm -c {cfg}", False)
|
||||||
|
sc.dnodeStop(idx)
|
||||||
|
etool.exeBinFile("taosd", "-a http://192.168.1.10")
|
||||||
|
|
||||||
|
#exe
|
||||||
|
etool.exeBinFile("taosd", f"-E abc -c {cfg}", False)
|
||||||
|
sc.dnodeStop(idx)
|
||||||
|
etool.exeBinFile("taosd", f"-e def -c {cfg}", False)
|
||||||
|
|
||||||
|
# stop taosd test taos as server
|
||||||
|
sc.dnodeStop(idx)
|
||||||
|
etool.exeBinFile("taos", f'-n server', wait=False)
|
||||||
|
|
||||||
|
# run
|
||||||
|
def run(self):
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
|
||||||
|
# insert data
|
||||||
|
self.insertData()
|
||||||
|
|
||||||
|
# do taos
|
||||||
|
self.doTaos()
|
||||||
|
|
||||||
|
# do action
|
||||||
|
self.doTaosd()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -32,7 +32,7 @@ class TDTestCase(TBase):
|
||||||
tdLog.info(f"insert data.")
|
tdLog.info(f"insert data.")
|
||||||
# taosBenchmark run
|
# taosBenchmark run
|
||||||
json = etool.curFile(__file__, "mlevel_basic.json")
|
json = etool.curFile(__file__, "mlevel_basic.json")
|
||||||
etool.runBenchmark(json=json)
|
etool.benchMark(json=json)
|
||||||
|
|
||||||
tdSql.execute(f"use {self.db}")
|
tdSql.execute(f"use {self.db}")
|
||||||
# set insert data information
|
# set insert data information
|
||||||
|
|
|
@ -54,16 +54,21 @@ class TDTestCase(TBase):
|
||||||
tdLog.info(f"insert data.")
|
tdLog.info(f"insert data.")
|
||||||
# taosBenchmark run
|
# taosBenchmark run
|
||||||
json = etool.curFile(__file__, "s3_basic.json")
|
json = etool.curFile(__file__, "s3_basic.json")
|
||||||
etool.runBenchmark(json=json)
|
etool.benchMark(json=json)
|
||||||
|
|
||||||
tdSql.execute(f"use {self.db}")
|
tdSql.execute(f"use {self.db}")
|
||||||
# set insert data information
|
# come from s3_basic.json
|
||||||
self.childtable_count = 4
|
self.childtable_count = 4
|
||||||
self.insert_rows = 1000000
|
self.insert_rows = 1000000
|
||||||
self.timestamp_step = 1000
|
self.timestamp_step = 1000
|
||||||
|
|
||||||
|
def createStream(self, sname):
|
||||||
|
sql = f"create stream {sname} fill_history 1 into stm1 as select count(*) from {self.db}.{self.stb} interval(10s);"
|
||||||
|
tdSql.execute(sql)
|
||||||
|
|
||||||
def doAction(self):
|
def doAction(self):
|
||||||
tdLog.info(f"do action.")
|
tdLog.info(f"do action.")
|
||||||
|
|
||||||
self.flushDb()
|
self.flushDb()
|
||||||
self.compactDb()
|
self.compactDb()
|
||||||
|
|
||||||
|
@ -80,16 +85,33 @@ class TDTestCase(TBase):
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
self.trimDb(True)
|
self.trimDb(True)
|
||||||
loop += 1
|
loop += 1
|
||||||
|
tdLog.info(f"loop={loop} wait 5s...")
|
||||||
|
|
||||||
|
def checkStreamCorrect(self):
|
||||||
|
sql = f"select count(*) from {self.db}.stm1"
|
||||||
|
count = 0
|
||||||
|
for i in range(120):
|
||||||
|
tdSql.query(sql)
|
||||||
|
count = tdSql.getData(0, 0)
|
||||||
|
if count == 100000 or count == 100001:
|
||||||
|
return True
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
tdLog.exit(f"stream count is not expect . expect = 100000 or 100001 real={count} . sql={sql}")
|
||||||
|
|
||||||
# run
|
# run
|
||||||
def run(self):
|
def run(self):
|
||||||
tdLog.debug(f"start to excute {__file__}")
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
self.sname = "stream1"
|
||||||
if eos.isArm64Cpu():
|
if eos.isArm64Cpu():
|
||||||
tdLog.success(f"{__file__} arm64 ignore executed")
|
tdLog.success(f"{__file__} arm64 ignore executed")
|
||||||
else:
|
else:
|
||||||
# insert data
|
# insert data
|
||||||
self.insertData()
|
self.insertData()
|
||||||
|
|
||||||
|
# creat stream
|
||||||
|
self.createStream(self.sname)
|
||||||
|
|
||||||
# check insert data correct
|
# check insert data correct
|
||||||
self.checkInsertCorrect()
|
self.checkInsertCorrect()
|
||||||
|
|
||||||
|
@ -105,6 +127,12 @@ class TDTestCase(TBase):
|
||||||
# check insert correct again
|
# check insert correct again
|
||||||
self.checkInsertCorrect()
|
self.checkInsertCorrect()
|
||||||
|
|
||||||
|
# check stream correct and drop stream
|
||||||
|
self.checkStreamCorrect()
|
||||||
|
|
||||||
|
# drop stream
|
||||||
|
self.dropStream(self.sname)
|
||||||
|
|
||||||
# drop database and free s3 file
|
# drop database and free s3 file
|
||||||
self.dropDb()
|
self.dropDb()
|
||||||
|
|
||||||
|
|
|
@ -72,6 +72,9 @@ class TBase:
|
||||||
def dropDb(self, show = False):
|
def dropDb(self, show = False):
|
||||||
tdSql.execute(f"drop database {self.db}", show = show)
|
tdSql.execute(f"drop database {self.db}", show = show)
|
||||||
|
|
||||||
|
def dropStream(self, sname, show = False):
|
||||||
|
tdSql.execute(f"drop stream {sname}", show = show)
|
||||||
|
|
||||||
def splitVGroups(self):
|
def splitVGroups(self):
|
||||||
vgids = self.getVGroup(self.db)
|
vgids = self.getVGroup(self.db)
|
||||||
selid = random.choice(vgids)
|
selid = random.choice(vgids)
|
||||||
|
@ -188,3 +191,14 @@ class TBase:
|
||||||
time.sleep(interval)
|
time.sleep(interval)
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
# check file exist
|
||||||
|
def checkFileExist(self, pathFile):
|
||||||
|
if os.path.exists(pathFile) == False:
|
||||||
|
tdLog.exit(f"file not exist {pathFile}")
|
||||||
|
|
||||||
|
# check list not exist
|
||||||
|
def checkListNotEmpty(self, lists, tips=""):
|
||||||
|
if len(lists) == 0:
|
||||||
|
tdLog.exit(f"list is empty {tips}")
|
||||||
|
|
||||||
|
|
|
@ -57,13 +57,16 @@ def exe(file):
|
||||||
|
|
||||||
# execute file and return immediately
|
# execute file and return immediately
|
||||||
def exeNoWait(file):
|
def exeNoWait(file):
|
||||||
print("exe no wait")
|
if isWin():
|
||||||
|
cmd = f"mintty -h never {file}"
|
||||||
|
else:
|
||||||
|
cmd = f"nohup {file} > /dev/null 2>&1 & "
|
||||||
|
return exe(cmd)
|
||||||
|
|
||||||
# run return output and error
|
# run return output and error
|
||||||
def run(command):
|
def run(command, timeout = 10):
|
||||||
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
process.wait(3)
|
process.wait(timeout)
|
||||||
|
|
||||||
output = process.stdout.read().decode(encoding="gbk")
|
output = process.stdout.read().decode(encoding="gbk")
|
||||||
error = process.stderr.read().decode(encoding="gbk")
|
error = process.stderr.read().decode(encoding="gbk")
|
||||||
|
@ -72,7 +75,13 @@ def run(command):
|
||||||
|
|
||||||
|
|
||||||
# return list after run
|
# return list after run
|
||||||
def runRetList(command):
|
def runRetList(command, timeout=10):
|
||||||
lines = []
|
output,error = run(command, timeout)
|
||||||
output,error = run(command)
|
|
||||||
return output.splitlines()
|
return output.splitlines()
|
||||||
|
|
||||||
|
#
|
||||||
|
# file
|
||||||
|
#
|
||||||
|
|
||||||
|
def delFile(file):
|
||||||
|
return exe(f"rm -rf {file}")
|
|
@ -24,7 +24,7 @@ import frame.eos
|
||||||
from frame.log import *
|
from frame.log import *
|
||||||
|
|
||||||
# run taosBenchmark with command or json file mode
|
# run taosBenchmark with command or json file mode
|
||||||
def runBenchmark(command = "", json = "") :
|
def benchMark(command = "", json = "") :
|
||||||
# get taosBenchmark path
|
# get taosBenchmark path
|
||||||
bmFile = frame.epath.binFile("taosBenchmark")
|
bmFile = frame.epath.binFile("taosBenchmark")
|
||||||
if frame.eos.isWin():
|
if frame.eos.isWin():
|
||||||
|
@ -44,3 +44,31 @@ def runBenchmark(command = "", json = "") :
|
||||||
# get current directory file name
|
# get current directory file name
|
||||||
def curFile(fullPath, filename):
|
def curFile(fullPath, filename):
|
||||||
return os.path.dirname(fullPath) + "/" + filename
|
return os.path.dirname(fullPath) + "/" + filename
|
||||||
|
|
||||||
|
|
||||||
|
# run build/bin file
|
||||||
|
def runBinFile(fname, command, show=True):
|
||||||
|
binFile = frame.epath.binFile(fname)
|
||||||
|
if frame.eos.isWin():
|
||||||
|
binFile += ".exe"
|
||||||
|
|
||||||
|
cmd = f"{binFile} {command}"
|
||||||
|
if show:
|
||||||
|
tdLog.info(cmd)
|
||||||
|
return frame.eos.runRetList(cmd)
|
||||||
|
|
||||||
|
# exe build/bin file
|
||||||
|
def exeBinFile(fname, command, wait=True, show=True):
|
||||||
|
binFile = frame.epath.binFile(fname)
|
||||||
|
if frame.eos.isWin():
|
||||||
|
binFile += ".exe"
|
||||||
|
|
||||||
|
cmd = f"{binFile} {command}"
|
||||||
|
if wait:
|
||||||
|
if show:
|
||||||
|
tdLog.info("wait exe:" + cmd)
|
||||||
|
return frame.eos.exe(f"{binFile} {command}")
|
||||||
|
else:
|
||||||
|
if show:
|
||||||
|
tdLog.info("no wait exe:" + cmd)
|
||||||
|
return frame.eos.exeNoWait(cmd)
|
|
@ -0,0 +1,591 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import platform
|
||||||
|
import distro
|
||||||
|
import subprocess
|
||||||
|
from time import sleep
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
|
import copy
|
||||||
|
from fabric2 import Connection
|
||||||
|
from shutil import which
|
||||||
|
|
||||||
|
# self
|
||||||
|
from frame.log import *
|
||||||
|
|
||||||
|
|
||||||
|
class TDDnode:
|
||||||
|
def __init__(self, index=1, level=1, disk=1):
|
||||||
|
self.index = index
|
||||||
|
self.level = level
|
||||||
|
self.disk = disk
|
||||||
|
self.dataDir = []
|
||||||
|
self.running = 0
|
||||||
|
self.deployed = 0
|
||||||
|
self.testCluster = False
|
||||||
|
self.valgrind = 0
|
||||||
|
self.asan = False
|
||||||
|
self.remoteIP = ""
|
||||||
|
self.cfgDict = {
|
||||||
|
"fqdn": "localhost",
|
||||||
|
"monitor": "0",
|
||||||
|
"maxShellConns": "30000",
|
||||||
|
"locale": "en_US.UTF-8",
|
||||||
|
"charset": "UTF-8",
|
||||||
|
"asyncLog": "0",
|
||||||
|
"mDebugFlag": "143",
|
||||||
|
"dDebugFlag": "143",
|
||||||
|
"vDebugFlag": "143",
|
||||||
|
"tqDebugFlag": "143",
|
||||||
|
"cDebugFlag": "143",
|
||||||
|
"stDebugFlag": "143",
|
||||||
|
"smaDebugFlag": "143",
|
||||||
|
"jniDebugFlag": "143",
|
||||||
|
"qDebugFlag": "143",
|
||||||
|
"rpcDebugFlag": "143",
|
||||||
|
"tmrDebugFlag": "131",
|
||||||
|
"uDebugFlag": "135",
|
||||||
|
"sDebugFlag": "135",
|
||||||
|
"wDebugFlag": "135",
|
||||||
|
"numOfLogLines": "100000000",
|
||||||
|
"statusInterval": "1",
|
||||||
|
"enableQueryHb": "1",
|
||||||
|
"supportVnodes": "1024",
|
||||||
|
"telemetryReporting": "0"
|
||||||
|
}
|
||||||
|
|
||||||
|
def init(self, path, remoteIP = ""):
|
||||||
|
self.path = path
|
||||||
|
self.remoteIP = remoteIP
|
||||||
|
if (not self.remoteIP == ""):
|
||||||
|
try:
|
||||||
|
self.config = eval(self.remoteIP)
|
||||||
|
self.remote_conn = Connection(host=self.config["host"], port=self.config["port"], user=self.config["user"], connect_kwargs={'password':self.config["password"]})
|
||||||
|
except Exception as r:
|
||||||
|
print(r)
|
||||||
|
|
||||||
|
def setTestCluster(self, value):
|
||||||
|
self.testCluster = value
|
||||||
|
|
||||||
|
def setValgrind(self, value):
|
||||||
|
self.valgrind = value
|
||||||
|
|
||||||
|
def setAsan(self, value):
|
||||||
|
self.asan = value
|
||||||
|
if value:
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
if ("community" in selfPath):
|
||||||
|
self.execPath = os.path.abspath(self.path + "/community/tests/script/sh/exec.sh")
|
||||||
|
else:
|
||||||
|
self.execPath = os.path.abspath(self.path + "/tests/script/sh/exec.sh")
|
||||||
|
|
||||||
|
def getDataSize(self):
|
||||||
|
totalSize = 0
|
||||||
|
|
||||||
|
if (self.deployed == 1):
|
||||||
|
for dirpath, dirnames, filenames in os.walk(self.dataDir):
|
||||||
|
for f in filenames:
|
||||||
|
fp = os.path.join(dirpath, f)
|
||||||
|
|
||||||
|
if not os.path.islink(fp):
|
||||||
|
totalSize = totalSize + os.path.getsize(fp)
|
||||||
|
|
||||||
|
return totalSize
|
||||||
|
|
||||||
|
def addExtraCfg(self, option, value):
|
||||||
|
self.cfgDict.update({option: value})
|
||||||
|
|
||||||
|
def remoteExec(self, updateCfgDict, execCmd):
|
||||||
|
valgrindStr = ''
|
||||||
|
if (self.valgrind==1):
|
||||||
|
valgrindStr = '-g'
|
||||||
|
remoteCfgDict = copy.deepcopy(updateCfgDict)
|
||||||
|
if ("logDir" in remoteCfgDict):
|
||||||
|
del remoteCfgDict["logDir"]
|
||||||
|
if ("dataDir" in remoteCfgDict):
|
||||||
|
del remoteCfgDict["dataDir"]
|
||||||
|
if ("cfgDir" in remoteCfgDict):
|
||||||
|
del remoteCfgDict["cfgDir"]
|
||||||
|
remoteCfgDictStr = base64.b64encode(json.dumps(remoteCfgDict).encode()).decode()
|
||||||
|
execCmdStr = base64.b64encode(execCmd.encode()).decode()
|
||||||
|
with self.remote_conn.cd((self.config["path"]+sys.path[0].replace(self.path, '')).replace('\\','/')):
|
||||||
|
self.remote_conn.run("python3 ./test.py %s -d %s -e %s"%(valgrindStr,remoteCfgDictStr,execCmdStr))
|
||||||
|
|
||||||
|
def deploy(self, *updatecfgDict):
|
||||||
|
# logDir
|
||||||
|
self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log")
|
||||||
|
# dataDir
|
||||||
|
simPath = os.path.join(self.path, "sim", "dnode%d" % self.index)
|
||||||
|
primary = 1
|
||||||
|
if self.level == 1 and self.disk == 1:
|
||||||
|
eDir = os.path.join(simPath, "data")
|
||||||
|
self.dataDir.append(eDir)
|
||||||
|
else:
|
||||||
|
for i in range(self.level):
|
||||||
|
for j in range(self.disk):
|
||||||
|
eDir = os.path.join(simPath, f"data{i}{j}")
|
||||||
|
self.dataDir.append(f"{eDir} {i} {primary}")
|
||||||
|
if primary == 1:
|
||||||
|
primary = 0
|
||||||
|
|
||||||
|
# taos.cfg
|
||||||
|
self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg")
|
||||||
|
self.cfgPath = os.path.join(self.cfgDir, "taos.cfg")
|
||||||
|
|
||||||
|
for eDir in self.dataDir:
|
||||||
|
cmd = "rm -rf " + eDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
# cmd = "mkdir -p " + self.dataDir
|
||||||
|
# if os.system(cmd) != 0:
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
for eDir in self.dataDir:
|
||||||
|
os.makedirs(eDir.split(' ')[0])
|
||||||
|
|
||||||
|
# cmd = "mkdir -p " + self.logDir
|
||||||
|
# if os.system(cmd) != 0:
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
os.makedirs(self.logDir)
|
||||||
|
|
||||||
|
# cmd = "mkdir -p " + self.cfgDir
|
||||||
|
# if os.system(cmd) != 0:
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
os.makedirs(self.cfgDir)
|
||||||
|
|
||||||
|
cmd = "touch " + self.cfgPath
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.startIP()
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.cfg("masterIp", "192.168.0.1")
|
||||||
|
self.cfg("secondIp", "192.168.0.2")
|
||||||
|
self.cfg("publicIp", "192.168.0.%d" % (self.index))
|
||||||
|
self.cfg("internalIp", "192.168.0.%d" % (self.index))
|
||||||
|
self.cfg("privateIp", "192.168.0.%d" % (self.index))
|
||||||
|
self.cfgDict["dataDir"] = self.dataDir
|
||||||
|
self.cfgDict["logDir"] = self.logDir
|
||||||
|
# self.cfg("dataDir",self.dataDir)
|
||||||
|
# self.cfg("logDir",self.logDir)
|
||||||
|
# print(updatecfgDict)
|
||||||
|
isFirstDir = 1
|
||||||
|
if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]:
|
||||||
|
for key, value in updatecfgDict[0][0].items():
|
||||||
|
if key == "clientCfg" and self.remoteIP == "" and not platform.system().lower() == 'windows':
|
||||||
|
continue
|
||||||
|
if value == 'dataDir':
|
||||||
|
if isFirstDir:
|
||||||
|
self.cfgDict.pop('dataDir')
|
||||||
|
self.cfg(value, key)
|
||||||
|
isFirstDir = 0
|
||||||
|
else:
|
||||||
|
self.cfg(value, key)
|
||||||
|
else:
|
||||||
|
self.addExtraCfg(key, value)
|
||||||
|
if (self.remoteIP == ""):
|
||||||
|
for key, value in self.cfgDict.items():
|
||||||
|
if type(value) == list:
|
||||||
|
for v in value:
|
||||||
|
self.cfg(key, v)
|
||||||
|
else:
|
||||||
|
self.cfg(key, value)
|
||||||
|
else:
|
||||||
|
self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)"%self.index)
|
||||||
|
|
||||||
|
self.deployed = 1
|
||||||
|
tdLog.debug(
|
||||||
|
"dnode:%d is deployed and configured by %s" %
|
||||||
|
(self.index, self.cfgPath))
|
||||||
|
|
||||||
|
def getPath(self, tool="taosd"):
|
||||||
|
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
if ("community" in selfPath):
|
||||||
|
projPath = selfPath[:selfPath.find("community")]
|
||||||
|
else:
|
||||||
|
projPath = selfPath[:selfPath.find("tests")]
|
||||||
|
|
||||||
|
paths = []
|
||||||
|
for root, dirs, files in os.walk(projPath):
|
||||||
|
if ((tool) in files or ("%s.exe"%tool) in files):
|
||||||
|
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||||
|
if ("packaging" not in rootRealPath):
|
||||||
|
paths.append(os.path.join(root, tool))
|
||||||
|
break
|
||||||
|
if (len(paths) == 0):
|
||||||
|
return ""
|
||||||
|
return paths[0]
|
||||||
|
|
||||||
|
def starttaosd(self):
|
||||||
|
binPath = self.getPath()
|
||||||
|
|
||||||
|
if (binPath == ""):
|
||||||
|
tdLog.exit("taosd not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info("taosd found: %s" % binPath)
|
||||||
|
|
||||||
|
if self.deployed == 0:
|
||||||
|
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||||
|
|
||||||
|
if self.valgrind == 0:
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
cmd = "mintty -h never %s -c %s" % (
|
||||||
|
binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
if self.asan:
|
||||||
|
asanDir = "%s/sim/asan/dnode%d.asan" % (
|
||||||
|
self.path, self.index)
|
||||||
|
cmd = "nohup %s -c %s > /dev/null 2> %s & " % (
|
||||||
|
binPath, self.cfgDir, asanDir)
|
||||||
|
else:
|
||||||
|
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
||||||
|
binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir
|
||||||
|
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
cmd = "mintty -h never %s %s -c %s" % (
|
||||||
|
valgrindCmdline, binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||||
|
valgrindCmdline, binPath, self.cfgDir)
|
||||||
|
|
||||||
|
print(cmd)
|
||||||
|
|
||||||
|
if (not self.remoteIP == ""):
|
||||||
|
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].deployed=1\ntdDnodes.dnodes[%d].logDir=\"%%s/sim/dnode%%d/log\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.dnodes[%d].cfgDir=\"%%s/sim/dnode%%d/cfg\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.start(%d)"%(self.index-1,self.index-1,self.index-1,self.index,self.index-1,self.index-1,self.index,self.index))
|
||||||
|
self.running = 1
|
||||||
|
else:
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
self.running = 1
|
||||||
|
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||||
|
if self.valgrind == 0:
|
||||||
|
time.sleep(0.1)
|
||||||
|
key1 = 'from offline to online'
|
||||||
|
bkey1 = bytes(key1, encoding="utf8")
|
||||||
|
key2= 'TDengine initialized successfully'
|
||||||
|
bkey2 = bytes(key2, encoding="utf8")
|
||||||
|
logFile = self.logDir + "/taosdlog.0"
|
||||||
|
i = 0
|
||||||
|
# while not os.path.exists(logFile):
|
||||||
|
# sleep(0.1)
|
||||||
|
# i += 1
|
||||||
|
# if i > 10:
|
||||||
|
# break
|
||||||
|
# tailCmdStr = 'tail -f '
|
||||||
|
# if platform.system().lower() == 'windows':
|
||||||
|
# tailCmdStr = 'tail -n +0 -f '
|
||||||
|
# popen = subprocess.Popen(
|
||||||
|
# tailCmdStr + logFile,
|
||||||
|
# stdout=subprocess.PIPE,
|
||||||
|
# stderr=subprocess.PIPE,
|
||||||
|
# shell=True)
|
||||||
|
# pid = popen.pid
|
||||||
|
# # print('Popen.pid:' + str(pid))
|
||||||
|
# timeout = time.time() + 60 * 2
|
||||||
|
# while True:
|
||||||
|
# line = popen.stdout.readline().strip()
|
||||||
|
# print(line)
|
||||||
|
# if bkey1 in line:
|
||||||
|
# popen.kill()
|
||||||
|
# break
|
||||||
|
# elif bkey2 in line:
|
||||||
|
# popen.kill()
|
||||||
|
# break
|
||||||
|
# if time.time() > timeout:
|
||||||
|
# print(time.time(),timeout)
|
||||||
|
# tdLog.exit('wait too long for taosd start')
|
||||||
|
tdLog.debug("the dnode:%d has been started." % (self.index))
|
||||||
|
else:
|
||||||
|
tdLog.debug(
|
||||||
|
"wait 10 seconds for the dnode:%d to start." %
|
||||||
|
(self.index))
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
binPath = self.getPath()
|
||||||
|
|
||||||
|
if (binPath == ""):
|
||||||
|
tdLog.exit("taosd not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info("taosd found: %s" % binPath)
|
||||||
|
|
||||||
|
if self.deployed == 0:
|
||||||
|
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||||
|
|
||||||
|
if self.valgrind == 0:
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
cmd = "mintty -h never %s -c %s" % (
|
||||||
|
binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
if self.asan:
|
||||||
|
asanDir = "%s/sim/asan/dnode%d.asan" % (
|
||||||
|
self.path, self.index)
|
||||||
|
cmd = "nohup %s -c %s > /dev/null 2> %s & " % (
|
||||||
|
binPath, self.cfgDir, asanDir)
|
||||||
|
else:
|
||||||
|
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
||||||
|
binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir
|
||||||
|
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
cmd = "mintty -h never %s %s -c %s" % (
|
||||||
|
valgrindCmdline, binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||||
|
valgrindCmdline, binPath, self.cfgDir)
|
||||||
|
|
||||||
|
print(cmd)
|
||||||
|
|
||||||
|
if (not self.remoteIP == ""):
|
||||||
|
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].deployed=1\ntdDnodes.dnodes[%d].logDir=\"%%s/sim/dnode%%d/log\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.dnodes[%d].cfgDir=\"%%s/sim/dnode%%d/cfg\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.start(%d)"%(self.index-1,self.index-1,self.index-1,self.index,self.index-1,self.index-1,self.index,self.index))
|
||||||
|
self.running = 1
|
||||||
|
else:
|
||||||
|
os.system("rm -rf %s/taosdlog.0"%self.logDir)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
self.running = 1
|
||||||
|
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||||
|
if self.valgrind == 0:
|
||||||
|
time.sleep(0.1)
|
||||||
|
key = 'from offline to online'
|
||||||
|
bkey = bytes(key, encoding="utf8")
|
||||||
|
logFile = self.logDir + "/taosdlog.0"
|
||||||
|
i = 0
|
||||||
|
while not os.path.exists(logFile):
|
||||||
|
sleep(0.1)
|
||||||
|
i += 1
|
||||||
|
if i > 50:
|
||||||
|
break
|
||||||
|
with open(logFile) as f:
|
||||||
|
timeout = time.time() + 10 * 2
|
||||||
|
while True:
|
||||||
|
line = f.readline().encode('utf-8')
|
||||||
|
if bkey in line:
|
||||||
|
break
|
||||||
|
if time.time() > timeout:
|
||||||
|
tdLog.exit('wait too long for taosd start')
|
||||||
|
tdLog.debug("the dnode:%d has been started." % (self.index))
|
||||||
|
else:
|
||||||
|
tdLog.debug(
|
||||||
|
"wait 10 seconds for the dnode:%d to start." %
|
||||||
|
(self.index))
|
||||||
|
time.sleep(10)
|
||||||
|
|
||||||
|
def startWithoutSleep(self):
|
||||||
|
binPath = self.getPath()
|
||||||
|
|
||||||
|
if (binPath == ""):
|
||||||
|
tdLog.exit("taosd not found!")
|
||||||
|
else:
|
||||||
|
tdLog.info("taosd found: %s" % binPath)
|
||||||
|
|
||||||
|
if self.deployed == 0:
|
||||||
|
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||||
|
|
||||||
|
if self.valgrind == 0:
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
cmd = "mintty -h never %s -c %s" % (binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
if self.asan:
|
||||||
|
asanDir = "%s/sim/asan/dnode%d.asan" % (
|
||||||
|
self.path, self.index)
|
||||||
|
cmd = "nohup %s -c %s > /dev/null 2> %s & " % (
|
||||||
|
binPath, self.cfgDir, asanDir)
|
||||||
|
else:
|
||||||
|
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
||||||
|
binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
cmd = "mintty -h never %s %s -c %s" % (
|
||||||
|
valgrindCmdline, binPath, self.cfgDir)
|
||||||
|
else:
|
||||||
|
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
||||||
|
valgrindCmdline, binPath, self.cfgDir)
|
||||||
|
print(cmd)
|
||||||
|
|
||||||
|
if (self.remoteIP == ""):
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
else:
|
||||||
|
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].deployed=1\ntdDnodes.dnodes[%d].logDir=\"%%s/sim/dnode%%d/log\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.dnodes[%d].cfgDir=\"%%s/sim/dnode%%d/cfg\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.startWithoutSleep(%d)"%(self.index-1,self.index-1,self.index-1,self.index,self.index-1,self.index-1,self.index,self.index))
|
||||||
|
|
||||||
|
self.running = 1
|
||||||
|
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
if self.asan:
|
||||||
|
stopCmd = "%s -s stop -n dnode%d" % (self.execPath, self.index)
|
||||||
|
tdLog.info("execute script: " + stopCmd)
|
||||||
|
os.system(stopCmd)
|
||||||
|
return
|
||||||
|
|
||||||
|
if (not self.remoteIP == ""):
|
||||||
|
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].stop()"%(self.index-1,self.index-1))
|
||||||
|
tdLog.info("stop dnode%d"%self.index)
|
||||||
|
return
|
||||||
|
if self.valgrind == 0:
|
||||||
|
toBeKilled = "taosd"
|
||||||
|
else:
|
||||||
|
toBeKilled = "valgrind.bin"
|
||||||
|
|
||||||
|
if self.running != 0:
|
||||||
|
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs" % toBeKilled
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
|
||||||
|
onlyKillOnceWindows = 0
|
||||||
|
while(processID):
|
||||||
|
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
|
||||||
|
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
killCmd = "kill -INT %s > nul 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
onlyKillOnceWindows = 1
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
if not platform.system().lower() == 'windows':
|
||||||
|
for port in range(6030, 6041):
|
||||||
|
fuserCmd = "fuser -k -n tcp %d > /dev/null" % port
|
||||||
|
os.system(fuserCmd)
|
||||||
|
if self.valgrind:
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
self.running = 0
|
||||||
|
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||||
|
|
||||||
|
|
||||||
|
def stoptaosd(self):
|
||||||
|
tdLog.debug("start to stop taosd on dnode: %d "% (self.index))
|
||||||
|
# print(self.asan,self.running,self.remoteIP,self.valgrind)
|
||||||
|
if self.asan:
|
||||||
|
stopCmd = "%s -s stop -n dnode%d" % (self.execPath, self.index)
|
||||||
|
tdLog.info("execute script: " + stopCmd)
|
||||||
|
os.system(stopCmd)
|
||||||
|
return
|
||||||
|
|
||||||
|
if (not self.remoteIP == ""):
|
||||||
|
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].stop()"%(self.index-1,self.index-1))
|
||||||
|
tdLog.info("stop dnode%d"%self.index)
|
||||||
|
return
|
||||||
|
if self.valgrind == 0:
|
||||||
|
toBeKilled = "taosd"
|
||||||
|
else:
|
||||||
|
toBeKilled = "valgrind.bin"
|
||||||
|
|
||||||
|
if self.running != 0:
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
psCmd = "for /f %%a in ('wmic process where \"name='taosd.exe' and CommandLine like '%%dnode%d%%'\" get processId ^| xargs echo ^| awk ^'{print $2}^' ^&^& echo aa') do @(ps | grep %%a | awk '{print $1}' | xargs)" % (self.index)
|
||||||
|
else:
|
||||||
|
psCmd = "ps -ef|grep -w %s| grep dnode%d|grep -v grep | awk '{print $2}' | xargs" % (toBeKilled,self.index)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
|
||||||
|
onlyKillOnceWindows = 0
|
||||||
|
while(processID):
|
||||||
|
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
|
||||||
|
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||||
|
if platform.system().lower() == 'windows':
|
||||||
|
killCmd = "kill -INT %s > nul 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
onlyKillOnceWindows = 1
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
if self.valgrind:
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
self.running = 0
|
||||||
|
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||||
|
|
||||||
|
def forcestop(self):
|
||||||
|
if self.asan:
|
||||||
|
stopCmd = "%s -s stop -n dnode%d -x SIGKILL" + \
|
||||||
|
(self.execPath, self.index)
|
||||||
|
tdLog.info("execute script: " + stopCmd)
|
||||||
|
os.system(stopCmd)
|
||||||
|
return
|
||||||
|
|
||||||
|
if (not self.remoteIP == ""):
|
||||||
|
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].forcestop()"%(self.index-1,self.index-1))
|
||||||
|
return
|
||||||
|
if self.valgrind == 0:
|
||||||
|
toBeKilled = "taosd"
|
||||||
|
else:
|
||||||
|
toBeKilled = "valgrind.bin"
|
||||||
|
|
||||||
|
if self.running != 0:
|
||||||
|
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs" % toBeKilled
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
|
||||||
|
onlyKillOnceWindows = 0
|
||||||
|
while(processID):
|
||||||
|
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
|
||||||
|
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
||||||
|
os.system(killCmd)
|
||||||
|
onlyKillOnceWindows = 1
|
||||||
|
time.sleep(1)
|
||||||
|
processID = subprocess.check_output(
|
||||||
|
psCmd, shell=True).decode("utf-8").strip()
|
||||||
|
for port in range(6030, 6041):
|
||||||
|
fuserCmd = "fuser -k -n tcp %d" % port
|
||||||
|
os.system(fuserCmd)
|
||||||
|
if self.valgrind:
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
self.running = 0
|
||||||
|
tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index))
|
||||||
|
|
||||||
|
def startIP(self):
|
||||||
|
cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def stopIP(self):
|
||||||
|
cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % (
|
||||||
|
self.index, self.index)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def cfg(self, option, value):
|
||||||
|
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def getDnodeRootDir(self, index):
|
||||||
|
dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index)
|
||||||
|
return dnodeRootDir
|
||||||
|
|
||||||
|
def getDnodesRootDir(self):
|
||||||
|
dnodesRootDir = os.path.join(self.path,"sim","psim")
|
||||||
|
return dnodesRootDir
|
|
@ -22,660 +22,12 @@ import base64
|
||||||
import json
|
import json
|
||||||
import copy
|
import copy
|
||||||
from fabric2 import Connection
|
from fabric2 import Connection
|
||||||
from frame.log import *
|
|
||||||
from shutil import which
|
from shutil import which
|
||||||
|
|
||||||
|
# self
|
||||||
class TDSimClient:
|
from frame.log import *
|
||||||
def __init__(self, path):
|
from frame.server.dnode import *
|
||||||
self.testCluster = False
|
from frame.server.simClient import *
|
||||||
self.path = path
|
|
||||||
self.cfgDict = {
|
|
||||||
"fqdn": "localhost",
|
|
||||||
"numOfLogLines": "100000000",
|
|
||||||
"locale": "en_US.UTF-8",
|
|
||||||
"charset": "UTF-8",
|
|
||||||
"asyncLog": "0",
|
|
||||||
"rpcDebugFlag": "135",
|
|
||||||
"tmrDebugFlag": "131",
|
|
||||||
"cDebugFlag": "135",
|
|
||||||
"uDebugFlag": "135",
|
|
||||||
"jniDebugFlag": "135",
|
|
||||||
"qDebugFlag": "135",
|
|
||||||
"supportVnodes": "1024",
|
|
||||||
"enableQueryHb": "1",
|
|
||||||
"telemetryReporting": "0",
|
|
||||||
"tqDebugflag": "135",
|
|
||||||
"wDebugflag":"135",
|
|
||||||
}
|
|
||||||
|
|
||||||
def getLogDir(self):
|
|
||||||
self.logDir = os.path.join(self.path,"sim","psim","log")
|
|
||||||
return self.logDir
|
|
||||||
|
|
||||||
def getCfgDir(self):
|
|
||||||
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
|
||||||
return self.cfgDir
|
|
||||||
|
|
||||||
def setTestCluster(self, value):
|
|
||||||
self.testCluster = value
|
|
||||||
|
|
||||||
def addExtraCfg(self, option, value):
|
|
||||||
self.cfgDict.update({option: value})
|
|
||||||
|
|
||||||
def cfg(self, option, value):
|
|
||||||
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
|
||||||
if os.system(cmd) != 0:
|
|
||||||
tdLog.exit(cmd)
|
|
||||||
|
|
||||||
def deploy(self, *updatecfgDict):
|
|
||||||
self.logDir = os.path.join(self.path,"sim","psim","log")
|
|
||||||
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
|
||||||
self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg")
|
|
||||||
|
|
||||||
cmd = "rm -rf " + self.logDir
|
|
||||||
if os.system(cmd) != 0:
|
|
||||||
tdLog.exit(cmd)
|
|
||||||
|
|
||||||
# cmd = "mkdir -p " + self.logDir
|
|
||||||
# if os.system(cmd) != 0:
|
|
||||||
# tdLog.exit(cmd)
|
|
||||||
os.makedirs(self.logDir)
|
|
||||||
|
|
||||||
cmd = "rm -rf " + self.cfgDir
|
|
||||||
if os.system(cmd) != 0:
|
|
||||||
tdLog.exit(cmd)
|
|
||||||
|
|
||||||
# cmd = "mkdir -p " + self.cfgDir
|
|
||||||
# if os.system(cmd) != 0:
|
|
||||||
# tdLog.exit(cmd)
|
|
||||||
os.makedirs(self.cfgDir)
|
|
||||||
|
|
||||||
cmd = "touch " + self.cfgPath
|
|
||||||
if os.system(cmd) != 0:
|
|
||||||
tdLog.exit(cmd)
|
|
||||||
|
|
||||||
if self.testCluster:
|
|
||||||
self.cfg("masterIp", "192.168.0.1")
|
|
||||||
self.cfg("secondIp", "192.168.0.2")
|
|
||||||
self.cfg("logDir", self.logDir)
|
|
||||||
|
|
||||||
for key, value in self.cfgDict.items():
|
|
||||||
self.cfg(key, value)
|
|
||||||
|
|
||||||
try:
|
|
||||||
if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]:
|
|
||||||
clientCfg = dict (updatecfgDict[0][0].get('clientCfg'))
|
|
||||||
for key, value in clientCfg.items():
|
|
||||||
self.cfg(key, value)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath))
|
|
||||||
|
|
||||||
|
|
||||||
class TDDnode:
|
|
||||||
def __init__(self, index=1, level=1, disk=1):
|
|
||||||
self.index = index
|
|
||||||
self.level = level
|
|
||||||
self.disk = disk
|
|
||||||
self.dataDir = []
|
|
||||||
self.running = 0
|
|
||||||
self.deployed = 0
|
|
||||||
self.testCluster = False
|
|
||||||
self.valgrind = 0
|
|
||||||
self.asan = False
|
|
||||||
self.remoteIP = ""
|
|
||||||
self.cfgDict = {
|
|
||||||
"fqdn": "localhost",
|
|
||||||
"monitor": "0",
|
|
||||||
"maxShellConns": "30000",
|
|
||||||
"locale": "en_US.UTF-8",
|
|
||||||
"charset": "UTF-8",
|
|
||||||
"asyncLog": "0",
|
|
||||||
"mDebugFlag": "143",
|
|
||||||
"dDebugFlag": "143",
|
|
||||||
"vDebugFlag": "143",
|
|
||||||
"tqDebugFlag": "143",
|
|
||||||
"cDebugFlag": "143",
|
|
||||||
"stDebugFlag": "143",
|
|
||||||
"smaDebugFlag": "143",
|
|
||||||
"jniDebugFlag": "143",
|
|
||||||
"qDebugFlag": "143",
|
|
||||||
"rpcDebugFlag": "143",
|
|
||||||
"tmrDebugFlag": "131",
|
|
||||||
"uDebugFlag": "135",
|
|
||||||
"sDebugFlag": "135",
|
|
||||||
"wDebugFlag": "135",
|
|
||||||
"numOfLogLines": "100000000",
|
|
||||||
"statusInterval": "1",
|
|
||||||
"enableQueryHb": "1",
|
|
||||||
"supportVnodes": "1024",
|
|
||||||
"telemetryReporting": "0"
|
|
||||||
}
|
|
||||||
|
|
||||||
def init(self, path, remoteIP = ""):
|
|
||||||
self.path = path
|
|
||||||
self.remoteIP = remoteIP
|
|
||||||
if (not self.remoteIP == ""):
|
|
||||||
try:
|
|
||||||
self.config = eval(self.remoteIP)
|
|
||||||
self.remote_conn = Connection(host=self.config["host"], port=self.config["port"], user=self.config["user"], connect_kwargs={'password':self.config["password"]})
|
|
||||||
except Exception as r:
|
|
||||||
print(r)
|
|
||||||
|
|
||||||
def setTestCluster(self, value):
|
|
||||||
self.testCluster = value
|
|
||||||
|
|
||||||
def setValgrind(self, value):
|
|
||||||
self.valgrind = value
|
|
||||||
|
|
||||||
def setAsan(self, value):
|
|
||||||
self.asan = value
|
|
||||||
if value:
|
|
||||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
|
||||||
if ("community" in selfPath):
|
|
||||||
self.execPath = os.path.abspath(self.path + "/community/tests/script/sh/exec.sh")
|
|
||||||
else:
|
|
||||||
self.execPath = os.path.abspath(self.path + "/tests/script/sh/exec.sh")
|
|
||||||
|
|
||||||
def getDataSize(self):
|
|
||||||
totalSize = 0
|
|
||||||
|
|
||||||
if (self.deployed == 1):
|
|
||||||
for dirpath, dirnames, filenames in os.walk(self.dataDir):
|
|
||||||
for f in filenames:
|
|
||||||
fp = os.path.join(dirpath, f)
|
|
||||||
|
|
||||||
if not os.path.islink(fp):
|
|
||||||
totalSize = totalSize + os.path.getsize(fp)
|
|
||||||
|
|
||||||
return totalSize
|
|
||||||
|
|
||||||
def addExtraCfg(self, option, value):
|
|
||||||
self.cfgDict.update({option: value})
|
|
||||||
|
|
||||||
def remoteExec(self, updateCfgDict, execCmd):
|
|
||||||
valgrindStr = ''
|
|
||||||
if (self.valgrind==1):
|
|
||||||
valgrindStr = '-g'
|
|
||||||
remoteCfgDict = copy.deepcopy(updateCfgDict)
|
|
||||||
if ("logDir" in remoteCfgDict):
|
|
||||||
del remoteCfgDict["logDir"]
|
|
||||||
if ("dataDir" in remoteCfgDict):
|
|
||||||
del remoteCfgDict["dataDir"]
|
|
||||||
if ("cfgDir" in remoteCfgDict):
|
|
||||||
del remoteCfgDict["cfgDir"]
|
|
||||||
remoteCfgDictStr = base64.b64encode(json.dumps(remoteCfgDict).encode()).decode()
|
|
||||||
execCmdStr = base64.b64encode(execCmd.encode()).decode()
|
|
||||||
with self.remote_conn.cd((self.config["path"]+sys.path[0].replace(self.path, '')).replace('\\','/')):
|
|
||||||
self.remote_conn.run("python3 ./test.py %s -d %s -e %s"%(valgrindStr,remoteCfgDictStr,execCmdStr))
|
|
||||||
|
|
||||||
def deploy(self, *updatecfgDict):
|
|
||||||
# logDir
|
|
||||||
self.logDir = os.path.join(self.path,"sim","dnode%d" % self.index, "log")
|
|
||||||
# dataDir
|
|
||||||
simPath = os.path.join(self.path, "sim", "dnode%d" % self.index)
|
|
||||||
primary = 1
|
|
||||||
if self.level == 1 and self.disk == 1:
|
|
||||||
eDir = os.path.join(simPath, "data")
|
|
||||||
self.dataDir.append(eDir)
|
|
||||||
else:
|
|
||||||
for i in range(self.level):
|
|
||||||
for j in range(self.disk):
|
|
||||||
eDir = os.path.join(simPath, f"data{i}{j}")
|
|
||||||
self.dataDir.append(f"{eDir} {i} {primary}")
|
|
||||||
if primary == 1:
|
|
||||||
primary = 0
|
|
||||||
|
|
||||||
# taos.cfg
|
|
||||||
self.cfgDir = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg")
|
|
||||||
self.cfgPath = os.path.join(self.path,"sim","dnode%d" % self.index, "cfg","taos.cfg")
|
|
||||||
|
|
||||||
for eDir in self.dataDir:
|
|
||||||
cmd = "rm -rf " + eDir
|
|
||||||
if os.system(cmd) != 0:
|
|
||||||
tdLog.exit(cmd)
|
|
||||||
|
|
||||||
cmd = "rm -rf " + self.logDir
|
|
||||||
if os.system(cmd) != 0:
|
|
||||||
tdLog.exit(cmd)
|
|
||||||
|
|
||||||
cmd = "rm -rf " + self.cfgDir
|
|
||||||
if os.system(cmd) != 0:
|
|
||||||
tdLog.exit(cmd)
|
|
||||||
|
|
||||||
# cmd = "mkdir -p " + self.dataDir
|
|
||||||
# if os.system(cmd) != 0:
|
|
||||||
# tdLog.exit(cmd)
|
|
||||||
for eDir in self.dataDir:
|
|
||||||
os.makedirs(eDir.split(' ')[0])
|
|
||||||
|
|
||||||
# cmd = "mkdir -p " + self.logDir
|
|
||||||
# if os.system(cmd) != 0:
|
|
||||||
# tdLog.exit(cmd)
|
|
||||||
os.makedirs(self.logDir)
|
|
||||||
|
|
||||||
# cmd = "mkdir -p " + self.cfgDir
|
|
||||||
# if os.system(cmd) != 0:
|
|
||||||
# tdLog.exit(cmd)
|
|
||||||
os.makedirs(self.cfgDir)
|
|
||||||
|
|
||||||
cmd = "touch " + self.cfgPath
|
|
||||||
if os.system(cmd) != 0:
|
|
||||||
tdLog.exit(cmd)
|
|
||||||
|
|
||||||
if self.testCluster:
|
|
||||||
self.startIP()
|
|
||||||
|
|
||||||
if self.testCluster:
|
|
||||||
self.cfg("masterIp", "192.168.0.1")
|
|
||||||
self.cfg("secondIp", "192.168.0.2")
|
|
||||||
self.cfg("publicIp", "192.168.0.%d" % (self.index))
|
|
||||||
self.cfg("internalIp", "192.168.0.%d" % (self.index))
|
|
||||||
self.cfg("privateIp", "192.168.0.%d" % (self.index))
|
|
||||||
self.cfgDict["dataDir"] = self.dataDir
|
|
||||||
self.cfgDict["logDir"] = self.logDir
|
|
||||||
# self.cfg("dataDir",self.dataDir)
|
|
||||||
# self.cfg("logDir",self.logDir)
|
|
||||||
# print(updatecfgDict)
|
|
||||||
isFirstDir = 1
|
|
||||||
if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]:
|
|
||||||
for key, value in updatecfgDict[0][0].items():
|
|
||||||
if key == "clientCfg" and self.remoteIP == "" and not platform.system().lower() == 'windows':
|
|
||||||
continue
|
|
||||||
if value == 'dataDir':
|
|
||||||
if isFirstDir:
|
|
||||||
self.cfgDict.pop('dataDir')
|
|
||||||
self.cfg(value, key)
|
|
||||||
isFirstDir = 0
|
|
||||||
else:
|
|
||||||
self.cfg(value, key)
|
|
||||||
else:
|
|
||||||
self.addExtraCfg(key, value)
|
|
||||||
if (self.remoteIP == ""):
|
|
||||||
for key, value in self.cfgDict.items():
|
|
||||||
if type(value) == list:
|
|
||||||
for v in value:
|
|
||||||
self.cfg(key, v)
|
|
||||||
else:
|
|
||||||
self.cfg(key, value)
|
|
||||||
else:
|
|
||||||
self.remoteExec(self.cfgDict, "tdDnodes.deploy(%d,updateCfgDict)"%self.index)
|
|
||||||
|
|
||||||
self.deployed = 1
|
|
||||||
tdLog.debug(
|
|
||||||
"dnode:%d is deployed and configured by %s" %
|
|
||||||
(self.index, self.cfgPath))
|
|
||||||
|
|
||||||
def getPath(self, tool="taosd"):
|
|
||||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
|
||||||
|
|
||||||
if ("community" in selfPath):
|
|
||||||
projPath = selfPath[:selfPath.find("community")]
|
|
||||||
else:
|
|
||||||
projPath = selfPath[:selfPath.find("tests")]
|
|
||||||
|
|
||||||
paths = []
|
|
||||||
for root, dirs, files in os.walk(projPath):
|
|
||||||
if ((tool) in files or ("%s.exe"%tool) in files):
|
|
||||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
|
||||||
if ("packaging" not in rootRealPath):
|
|
||||||
paths.append(os.path.join(root, tool))
|
|
||||||
break
|
|
||||||
if (len(paths) == 0):
|
|
||||||
return ""
|
|
||||||
return paths[0]
|
|
||||||
|
|
||||||
def starttaosd(self):
|
|
||||||
binPath = self.getPath()
|
|
||||||
|
|
||||||
if (binPath == ""):
|
|
||||||
tdLog.exit("taosd not found!")
|
|
||||||
else:
|
|
||||||
tdLog.info("taosd found: %s" % binPath)
|
|
||||||
|
|
||||||
if self.deployed == 0:
|
|
||||||
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
|
||||||
|
|
||||||
if self.valgrind == 0:
|
|
||||||
if platform.system().lower() == 'windows':
|
|
||||||
cmd = "mintty -h never %s -c %s" % (
|
|
||||||
binPath, self.cfgDir)
|
|
||||||
else:
|
|
||||||
if self.asan:
|
|
||||||
asanDir = "%s/sim/asan/dnode%d.asan" % (
|
|
||||||
self.path, self.index)
|
|
||||||
cmd = "nohup %s -c %s > /dev/null 2> %s & " % (
|
|
||||||
binPath, self.cfgDir, asanDir)
|
|
||||||
else:
|
|
||||||
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
|
||||||
binPath, self.cfgDir)
|
|
||||||
else:
|
|
||||||
valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir
|
|
||||||
|
|
||||||
if platform.system().lower() == 'windows':
|
|
||||||
cmd = "mintty -h never %s %s -c %s" % (
|
|
||||||
valgrindCmdline, binPath, self.cfgDir)
|
|
||||||
else:
|
|
||||||
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
|
||||||
valgrindCmdline, binPath, self.cfgDir)
|
|
||||||
|
|
||||||
print(cmd)
|
|
||||||
|
|
||||||
if (not self.remoteIP == ""):
|
|
||||||
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].deployed=1\ntdDnodes.dnodes[%d].logDir=\"%%s/sim/dnode%%d/log\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.dnodes[%d].cfgDir=\"%%s/sim/dnode%%d/cfg\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.start(%d)"%(self.index-1,self.index-1,self.index-1,self.index,self.index-1,self.index-1,self.index,self.index))
|
|
||||||
self.running = 1
|
|
||||||
else:
|
|
||||||
if os.system(cmd) != 0:
|
|
||||||
tdLog.exit(cmd)
|
|
||||||
self.running = 1
|
|
||||||
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
|
||||||
if self.valgrind == 0:
|
|
||||||
time.sleep(0.1)
|
|
||||||
key1 = 'from offline to online'
|
|
||||||
bkey1 = bytes(key1, encoding="utf8")
|
|
||||||
key2= 'TDengine initialized successfully'
|
|
||||||
bkey2 = bytes(key2, encoding="utf8")
|
|
||||||
logFile = self.logDir + "/taosdlog.0"
|
|
||||||
i = 0
|
|
||||||
# while not os.path.exists(logFile):
|
|
||||||
# sleep(0.1)
|
|
||||||
# i += 1
|
|
||||||
# if i > 10:
|
|
||||||
# break
|
|
||||||
# tailCmdStr = 'tail -f '
|
|
||||||
# if platform.system().lower() == 'windows':
|
|
||||||
# tailCmdStr = 'tail -n +0 -f '
|
|
||||||
# popen = subprocess.Popen(
|
|
||||||
# tailCmdStr + logFile,
|
|
||||||
# stdout=subprocess.PIPE,
|
|
||||||
# stderr=subprocess.PIPE,
|
|
||||||
# shell=True)
|
|
||||||
# pid = popen.pid
|
|
||||||
# # print('Popen.pid:' + str(pid))
|
|
||||||
# timeout = time.time() + 60 * 2
|
|
||||||
# while True:
|
|
||||||
# line = popen.stdout.readline().strip()
|
|
||||||
# print(line)
|
|
||||||
# if bkey1 in line:
|
|
||||||
# popen.kill()
|
|
||||||
# break
|
|
||||||
# elif bkey2 in line:
|
|
||||||
# popen.kill()
|
|
||||||
# break
|
|
||||||
# if time.time() > timeout:
|
|
||||||
# print(time.time(),timeout)
|
|
||||||
# tdLog.exit('wait too long for taosd start')
|
|
||||||
tdLog.debug("the dnode:%d has been started." % (self.index))
|
|
||||||
else:
|
|
||||||
tdLog.debug(
|
|
||||||
"wait 10 seconds for the dnode:%d to start." %
|
|
||||||
(self.index))
|
|
||||||
time.sleep(10)
|
|
||||||
|
|
||||||
def start(self):
|
|
||||||
binPath = self.getPath()
|
|
||||||
|
|
||||||
if (binPath == ""):
|
|
||||||
tdLog.exit("taosd not found!")
|
|
||||||
else:
|
|
||||||
tdLog.info("taosd found: %s" % binPath)
|
|
||||||
|
|
||||||
if self.deployed == 0:
|
|
||||||
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
|
||||||
|
|
||||||
if self.valgrind == 0:
|
|
||||||
if platform.system().lower() == 'windows':
|
|
||||||
cmd = "mintty -h never %s -c %s" % (
|
|
||||||
binPath, self.cfgDir)
|
|
||||||
else:
|
|
||||||
if self.asan:
|
|
||||||
asanDir = "%s/sim/asan/dnode%d.asan" % (
|
|
||||||
self.path, self.index)
|
|
||||||
cmd = "nohup %s -c %s > /dev/null 2> %s & " % (
|
|
||||||
binPath, self.cfgDir, asanDir)
|
|
||||||
else:
|
|
||||||
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
|
||||||
binPath, self.cfgDir)
|
|
||||||
else:
|
|
||||||
valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir
|
|
||||||
|
|
||||||
if platform.system().lower() == 'windows':
|
|
||||||
cmd = "mintty -h never %s %s -c %s" % (
|
|
||||||
valgrindCmdline, binPath, self.cfgDir)
|
|
||||||
else:
|
|
||||||
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
|
||||||
valgrindCmdline, binPath, self.cfgDir)
|
|
||||||
|
|
||||||
print(cmd)
|
|
||||||
|
|
||||||
if (not self.remoteIP == ""):
|
|
||||||
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].deployed=1\ntdDnodes.dnodes[%d].logDir=\"%%s/sim/dnode%%d/log\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.dnodes[%d].cfgDir=\"%%s/sim/dnode%%d/cfg\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.start(%d)"%(self.index-1,self.index-1,self.index-1,self.index,self.index-1,self.index-1,self.index,self.index))
|
|
||||||
self.running = 1
|
|
||||||
else:
|
|
||||||
os.system("rm -rf %s/taosdlog.0"%self.logDir)
|
|
||||||
if os.system(cmd) != 0:
|
|
||||||
tdLog.exit(cmd)
|
|
||||||
self.running = 1
|
|
||||||
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
|
||||||
if self.valgrind == 0:
|
|
||||||
time.sleep(0.1)
|
|
||||||
key = 'from offline to online'
|
|
||||||
bkey = bytes(key, encoding="utf8")
|
|
||||||
logFile = self.logDir + "/taosdlog.0"
|
|
||||||
i = 0
|
|
||||||
while not os.path.exists(logFile):
|
|
||||||
sleep(0.1)
|
|
||||||
i += 1
|
|
||||||
if i > 50:
|
|
||||||
break
|
|
||||||
with open(logFile) as f:
|
|
||||||
timeout = time.time() + 10 * 2
|
|
||||||
while True:
|
|
||||||
line = f.readline().encode('utf-8')
|
|
||||||
if bkey in line:
|
|
||||||
break
|
|
||||||
if time.time() > timeout:
|
|
||||||
tdLog.exit('wait too long for taosd start')
|
|
||||||
tdLog.debug("the dnode:%d has been started." % (self.index))
|
|
||||||
else:
|
|
||||||
tdLog.debug(
|
|
||||||
"wait 10 seconds for the dnode:%d to start." %
|
|
||||||
(self.index))
|
|
||||||
time.sleep(10)
|
|
||||||
|
|
||||||
def startWithoutSleep(self):
|
|
||||||
binPath = self.getPath()
|
|
||||||
|
|
||||||
if (binPath == ""):
|
|
||||||
tdLog.exit("taosd not found!")
|
|
||||||
else:
|
|
||||||
tdLog.info("taosd found: %s" % binPath)
|
|
||||||
|
|
||||||
if self.deployed == 0:
|
|
||||||
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
|
||||||
|
|
||||||
if self.valgrind == 0:
|
|
||||||
if platform.system().lower() == 'windows':
|
|
||||||
cmd = "mintty -h never %s -c %s" % (binPath, self.cfgDir)
|
|
||||||
else:
|
|
||||||
if self.asan:
|
|
||||||
asanDir = "%s/sim/asan/dnode%d.asan" % (
|
|
||||||
self.path, self.index)
|
|
||||||
cmd = "nohup %s -c %s > /dev/null 2> %s & " % (
|
|
||||||
binPath, self.cfgDir, asanDir)
|
|
||||||
else:
|
|
||||||
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
|
||||||
binPath, self.cfgDir)
|
|
||||||
else:
|
|
||||||
valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir
|
|
||||||
if platform.system().lower() == 'windows':
|
|
||||||
cmd = "mintty -h never %s %s -c %s" % (
|
|
||||||
valgrindCmdline, binPath, self.cfgDir)
|
|
||||||
else:
|
|
||||||
cmd = "nohup %s %s -c %s 2>&1 & " % (
|
|
||||||
valgrindCmdline, binPath, self.cfgDir)
|
|
||||||
print(cmd)
|
|
||||||
|
|
||||||
if (self.remoteIP == ""):
|
|
||||||
if os.system(cmd) != 0:
|
|
||||||
tdLog.exit(cmd)
|
|
||||||
else:
|
|
||||||
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].deployed=1\ntdDnodes.dnodes[%d].logDir=\"%%s/sim/dnode%%d/log\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.dnodes[%d].cfgDir=\"%%s/sim/dnode%%d/cfg\"%%(tdDnodes.dnodes[%d].path,%d)\ntdDnodes.startWithoutSleep(%d)"%(self.index-1,self.index-1,self.index-1,self.index,self.index-1,self.index-1,self.index,self.index))
|
|
||||||
|
|
||||||
self.running = 1
|
|
||||||
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
if self.asan:
|
|
||||||
stopCmd = "%s -s stop -n dnode%d" % (self.execPath, self.index)
|
|
||||||
tdLog.info("execute script: " + stopCmd)
|
|
||||||
os.system(stopCmd)
|
|
||||||
return
|
|
||||||
|
|
||||||
if (not self.remoteIP == ""):
|
|
||||||
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].stop()"%(self.index-1,self.index-1))
|
|
||||||
tdLog.info("stop dnode%d"%self.index)
|
|
||||||
return
|
|
||||||
if self.valgrind == 0:
|
|
||||||
toBeKilled = "taosd"
|
|
||||||
else:
|
|
||||||
toBeKilled = "valgrind.bin"
|
|
||||||
|
|
||||||
if self.running != 0:
|
|
||||||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs" % toBeKilled
|
|
||||||
processID = subprocess.check_output(
|
|
||||||
psCmd, shell=True).decode("utf-8").strip()
|
|
||||||
|
|
||||||
onlyKillOnceWindows = 0
|
|
||||||
while(processID):
|
|
||||||
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
|
|
||||||
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
|
||||||
if platform.system().lower() == 'windows':
|
|
||||||
killCmd = "kill -INT %s > nul 2>&1" % processID
|
|
||||||
os.system(killCmd)
|
|
||||||
onlyKillOnceWindows = 1
|
|
||||||
time.sleep(1)
|
|
||||||
processID = subprocess.check_output(
|
|
||||||
psCmd, shell=True).decode("utf-8").strip()
|
|
||||||
if not platform.system().lower() == 'windows':
|
|
||||||
for port in range(6030, 6041):
|
|
||||||
fuserCmd = "fuser -k -n tcp %d > /dev/null" % port
|
|
||||||
os.system(fuserCmd)
|
|
||||||
if self.valgrind:
|
|
||||||
time.sleep(2)
|
|
||||||
|
|
||||||
self.running = 0
|
|
||||||
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
|
||||||
|
|
||||||
|
|
||||||
def stoptaosd(self):
|
|
||||||
tdLog.debug("start to stop taosd on dnode: %d "% (self.index))
|
|
||||||
# print(self.asan,self.running,self.remoteIP,self.valgrind)
|
|
||||||
if self.asan:
|
|
||||||
stopCmd = "%s -s stop -n dnode%d" % (self.execPath, self.index)
|
|
||||||
tdLog.info("execute script: " + stopCmd)
|
|
||||||
os.system(stopCmd)
|
|
||||||
return
|
|
||||||
|
|
||||||
if (not self.remoteIP == ""):
|
|
||||||
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].stop()"%(self.index-1,self.index-1))
|
|
||||||
tdLog.info("stop dnode%d"%self.index)
|
|
||||||
return
|
|
||||||
if self.valgrind == 0:
|
|
||||||
toBeKilled = "taosd"
|
|
||||||
else:
|
|
||||||
toBeKilled = "valgrind.bin"
|
|
||||||
|
|
||||||
if self.running != 0:
|
|
||||||
if platform.system().lower() == 'windows':
|
|
||||||
psCmd = "for /f %%a in ('wmic process where \"name='taosd.exe' and CommandLine like '%%dnode%d%%'\" get processId ^| xargs echo ^| awk ^'{print $2}^' ^&^& echo aa') do @(ps | grep %%a | awk '{print $1}' | xargs)" % (self.index)
|
|
||||||
else:
|
|
||||||
psCmd = "ps -ef|grep -w %s| grep dnode%d|grep -v grep | awk '{print $2}' | xargs" % (toBeKilled,self.index)
|
|
||||||
processID = subprocess.check_output(
|
|
||||||
psCmd, shell=True).decode("utf-8").strip()
|
|
||||||
|
|
||||||
onlyKillOnceWindows = 0
|
|
||||||
while(processID):
|
|
||||||
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
|
|
||||||
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
|
||||||
if platform.system().lower() == 'windows':
|
|
||||||
killCmd = "kill -INT %s > nul 2>&1" % processID
|
|
||||||
os.system(killCmd)
|
|
||||||
onlyKillOnceWindows = 1
|
|
||||||
time.sleep(1)
|
|
||||||
processID = subprocess.check_output(
|
|
||||||
psCmd, shell=True).decode("utf-8").strip()
|
|
||||||
if self.valgrind:
|
|
||||||
time.sleep(2)
|
|
||||||
|
|
||||||
self.running = 0
|
|
||||||
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
|
||||||
|
|
||||||
def forcestop(self):
|
|
||||||
if self.asan:
|
|
||||||
stopCmd = "%s -s stop -n dnode%d -x SIGKILL" + \
|
|
||||||
(self.execPath, self.index)
|
|
||||||
tdLog.info("execute script: " + stopCmd)
|
|
||||||
os.system(stopCmd)
|
|
||||||
return
|
|
||||||
|
|
||||||
if (not self.remoteIP == ""):
|
|
||||||
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].forcestop()"%(self.index-1,self.index-1))
|
|
||||||
return
|
|
||||||
if self.valgrind == 0:
|
|
||||||
toBeKilled = "taosd"
|
|
||||||
else:
|
|
||||||
toBeKilled = "valgrind.bin"
|
|
||||||
|
|
||||||
if self.running != 0:
|
|
||||||
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs" % toBeKilled
|
|
||||||
processID = subprocess.check_output(
|
|
||||||
psCmd, shell=True).decode("utf-8").strip()
|
|
||||||
|
|
||||||
onlyKillOnceWindows = 0
|
|
||||||
while(processID):
|
|
||||||
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
|
|
||||||
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
|
|
||||||
os.system(killCmd)
|
|
||||||
onlyKillOnceWindows = 1
|
|
||||||
time.sleep(1)
|
|
||||||
processID = subprocess.check_output(
|
|
||||||
psCmd, shell=True).decode("utf-8").strip()
|
|
||||||
for port in range(6030, 6041):
|
|
||||||
fuserCmd = "fuser -k -n tcp %d" % port
|
|
||||||
os.system(fuserCmd)
|
|
||||||
if self.valgrind:
|
|
||||||
time.sleep(2)
|
|
||||||
|
|
||||||
self.running = 0
|
|
||||||
tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index))
|
|
||||||
|
|
||||||
def startIP(self):
|
|
||||||
cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index)
|
|
||||||
if os.system(cmd) != 0:
|
|
||||||
tdLog.exit(cmd)
|
|
||||||
|
|
||||||
def stopIP(self):
|
|
||||||
cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % (
|
|
||||||
self.index, self.index)
|
|
||||||
if os.system(cmd) != 0:
|
|
||||||
tdLog.exit(cmd)
|
|
||||||
|
|
||||||
def cfg(self, option, value):
|
|
||||||
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
|
||||||
if os.system(cmd) != 0:
|
|
||||||
tdLog.exit(cmd)
|
|
||||||
|
|
||||||
def getDnodeRootDir(self, index):
|
|
||||||
dnodeRootDir = os.path.join(self.path,"sim","psim","dnode%d" % index)
|
|
||||||
return dnodeRootDir
|
|
||||||
|
|
||||||
def getDnodesRootDir(self):
|
|
||||||
dnodesRootDir = os.path.join(self.path,"sim","psim")
|
|
||||||
return dnodesRootDir
|
|
||||||
|
|
||||||
|
|
||||||
class TDDnodes:
|
class TDDnodes:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
|
|
@ -0,0 +1,116 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import platform
|
||||||
|
import distro
|
||||||
|
import subprocess
|
||||||
|
from time import sleep
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
|
import copy
|
||||||
|
from fabric2 import Connection
|
||||||
|
from shutil import which
|
||||||
|
|
||||||
|
# self
|
||||||
|
from frame.log import *
|
||||||
|
|
||||||
|
|
||||||
|
class TDSimClient:
|
||||||
|
def __init__(self, path):
|
||||||
|
self.testCluster = False
|
||||||
|
self.path = path
|
||||||
|
self.cfgDict = {
|
||||||
|
"fqdn": "localhost",
|
||||||
|
"numOfLogLines": "100000000",
|
||||||
|
"locale": "en_US.UTF-8",
|
||||||
|
"charset": "UTF-8",
|
||||||
|
"asyncLog": "0",
|
||||||
|
"rpcDebugFlag": "135",
|
||||||
|
"tmrDebugFlag": "131",
|
||||||
|
"cDebugFlag": "135",
|
||||||
|
"uDebugFlag": "135",
|
||||||
|
"jniDebugFlag": "135",
|
||||||
|
"qDebugFlag": "135",
|
||||||
|
"supportVnodes": "1024",
|
||||||
|
"enableQueryHb": "1",
|
||||||
|
"telemetryReporting": "0",
|
||||||
|
"tqDebugflag": "135",
|
||||||
|
"wDebugflag":"135",
|
||||||
|
}
|
||||||
|
|
||||||
|
def getLogDir(self):
|
||||||
|
self.logDir = os.path.join(self.path,"sim","psim","log")
|
||||||
|
return self.logDir
|
||||||
|
|
||||||
|
def getCfgDir(self):
|
||||||
|
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
||||||
|
return self.cfgDir
|
||||||
|
|
||||||
|
def setTestCluster(self, value):
|
||||||
|
self.testCluster = value
|
||||||
|
|
||||||
|
def addExtraCfg(self, option, value):
|
||||||
|
self.cfgDict.update({option: value})
|
||||||
|
|
||||||
|
def cfg(self, option, value):
|
||||||
|
cmd = "echo %s %s >> %s" % (option, value, self.cfgPath)
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
def deploy(self, *updatecfgDict):
|
||||||
|
self.logDir = os.path.join(self.path,"sim","psim","log")
|
||||||
|
self.cfgDir = os.path.join(self.path,"sim","psim","cfg")
|
||||||
|
self.cfgPath = os.path.join(self.path,"sim","psim","cfg","taos.cfg")
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.logDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
# cmd = "mkdir -p " + self.logDir
|
||||||
|
# if os.system(cmd) != 0:
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
os.makedirs(self.logDir)
|
||||||
|
|
||||||
|
cmd = "rm -rf " + self.cfgDir
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
# cmd = "mkdir -p " + self.cfgDir
|
||||||
|
# if os.system(cmd) != 0:
|
||||||
|
# tdLog.exit(cmd)
|
||||||
|
os.makedirs(self.cfgDir)
|
||||||
|
|
||||||
|
cmd = "touch " + self.cfgPath
|
||||||
|
if os.system(cmd) != 0:
|
||||||
|
tdLog.exit(cmd)
|
||||||
|
|
||||||
|
if self.testCluster:
|
||||||
|
self.cfg("masterIp", "192.168.0.1")
|
||||||
|
self.cfg("secondIp", "192.168.0.2")
|
||||||
|
self.cfg("logDir", self.logDir)
|
||||||
|
|
||||||
|
for key, value in self.cfgDict.items():
|
||||||
|
self.cfg(key, value)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]:
|
||||||
|
clientCfg = dict (updatecfgDict[0][0].get('clientCfg'))
|
||||||
|
for key, value in clientCfg.items():
|
||||||
|
self.cfg(key, value)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath))
|
|
@ -482,6 +482,11 @@ class TDSql:
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
# execute many sql
|
||||||
|
def executes(self, sqls, queryTimes=30, show=False):
|
||||||
|
for sql in sqls:
|
||||||
|
self.execute(sql, queryTimes, show)
|
||||||
|
|
||||||
def checkAffectedRows(self, expectAffectedRows):
|
def checkAffectedRows(self, expectAffectedRows):
|
||||||
if self.affectedRows != expectAffectedRows:
|
if self.affectedRows != expectAffectedRows:
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
|
|
|
@ -16,8 +16,10 @@ import os
|
||||||
import time
|
import time
|
||||||
import datetime
|
import datetime
|
||||||
|
|
||||||
|
from frame.server.dnode import *
|
||||||
from frame.server.dnodes import *
|
from frame.server.dnodes import *
|
||||||
|
|
||||||
|
|
||||||
class srvCtl:
|
class srvCtl:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
# record server information
|
# record server information
|
||||||
|
@ -26,6 +28,19 @@ class srvCtl:
|
||||||
self.mLevel = 0
|
self.mLevel = 0
|
||||||
self.mLevelDisk = 0
|
self.mLevelDisk = 0
|
||||||
|
|
||||||
|
#
|
||||||
|
# control server
|
||||||
|
#
|
||||||
|
|
||||||
|
# start
|
||||||
|
def dnodeStart(self, idx):
|
||||||
|
return tdDnodes.starttaosd(idx)
|
||||||
|
|
||||||
|
# stop
|
||||||
|
def dnodeStop(self, idx):
|
||||||
|
return tdDnodes.stoptaosd(idx)
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# about path
|
# about path
|
||||||
#
|
#
|
||||||
|
@ -39,6 +54,13 @@ class srvCtl:
|
||||||
files = []
|
files = []
|
||||||
return files
|
return files
|
||||||
|
|
||||||
|
#
|
||||||
|
# get dnodes information
|
||||||
|
#
|
||||||
|
|
||||||
|
# taos.cfg position
|
||||||
|
def dnodeCfgPath(self, idx):
|
||||||
|
return tdDnodes.dnodes[idx-1].cfgPath
|
||||||
|
|
||||||
|
|
||||||
sc = srvCtl()
|
sc = srvCtl()
|
|
@ -11,6 +11,7 @@
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f enterprise/multi-level/mlevel_basic.py -N 3 -L 3 -D 2
|
,,y,army,./pytest.sh python3 ./test.py -f enterprise/multi-level/mlevel_basic.py -N 3 -L 3 -D 2
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f enterprise/s3/s3_basic.py -L 3 -D 1
|
,,y,army,./pytest.sh python3 ./test.py -f enterprise/s3/s3_basic.py -L 3 -D 1
|
||||||
,,y,army,./pytest.sh python3 ./test.py -f community/cluster/snapshot.py -N 3 -L 3 -D 2
|
,,y,army,./pytest.sh python3 ./test.py -f community/cluster/snapshot.py -N 3 -L 3 -D 2
|
||||||
|
,,n,army,python3 ./test.py -f community/cmdline/fullopt.py
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -36,6 +37,7 @@
|
||||||
#,,n,system-test,python3 ./test.py -f 8-stream/snode_restart_with_checkpoint.py -N 4
|
#,,n,system-test,python3 ./test.py -f 8-stream/snode_restart_with_checkpoint.py -N 4
|
||||||
|
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tbname_vgroup.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tbname_vgroup.py
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_interval.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/compact-col.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/compact-col.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py -Q 2
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py -Q 2
|
||||||
|
|
|
@ -9,10 +9,15 @@ else
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# work main path
|
||||||
|
TDENGINE_DIR=/root/TDinternal/community
|
||||||
|
if [ x$2 != x ];then
|
||||||
|
TDENGINE_DIR=$2
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "TDENGINE_DIR=$TDENGINE_DIR"
|
||||||
today=`date +"%Y%m%d"`
|
today=`date +"%Y%m%d"`
|
||||||
TDENGINE_DIR=/root/TDengine
|
|
||||||
JDBC_DIR=/root/taos-connector-jdbc
|
JDBC_DIR=/root/taos-connector-jdbc
|
||||||
TAOSKEEPER_DIR=/root/taoskeeper
|
|
||||||
TDENGINE_COVERAGE_REPORT=$TDENGINE_DIR/tests/coverage-report-$today.log
|
TDENGINE_COVERAGE_REPORT=$TDENGINE_DIR/tests/coverage-report-$today.log
|
||||||
|
|
||||||
# Color setting
|
# Color setting
|
||||||
|
@ -24,7 +29,16 @@ NC='\033[0m'
|
||||||
|
|
||||||
function buildTDengine() {
|
function buildTDengine() {
|
||||||
echo "check if TDengine need build"
|
echo "check if TDengine need build"
|
||||||
|
|
||||||
|
# pull parent code
|
||||||
|
cd "$TDENGINE_DIR/../"
|
||||||
|
echo "git pull parent code..."
|
||||||
|
git remote prune origin > /dev/null
|
||||||
|
git remote update > /dev/null
|
||||||
|
|
||||||
|
# pull tdengine code
|
||||||
cd $TDENGINE_DIR
|
cd $TDENGINE_DIR
|
||||||
|
echo "git pull tdengine code..."
|
||||||
git remote prune origin > /dev/null
|
git remote prune origin > /dev/null
|
||||||
git remote update > /dev/null
|
git remote update > /dev/null
|
||||||
REMOTE_COMMIT=`git rev-parse --short remotes/origin/$branch`
|
REMOTE_COMMIT=`git rev-parse --short remotes/origin/$branch`
|
||||||
|
@ -55,14 +69,11 @@ function buildTDengine() {
|
||||||
LOCAL_COMMIT=`git rev-parse --short @`
|
LOCAL_COMMIT=`git rev-parse --short @`
|
||||||
|
|
||||||
rm -rf *
|
rm -rf *
|
||||||
if [ "$branch" == "3.0" ]; then
|
makecmd="cmake -DCOVER=true -DBUILD_TEST=true -DBUILD_HTTP=false -DBUILD_TOOLS=true -DBUILD_GEOS=true -DBUILD_CONTRIB=true ../../"
|
||||||
echo "3.0 ============="
|
echo "$makecmd"
|
||||||
cmake -DCOVER=true -DBUILD_TEST=true -DBUILD_HTTP=false -DBUILD_TOOLS=true .. > /dev/null
|
$makecmd
|
||||||
else
|
|
||||||
cmake -DCOVER=true -DBUILD_TEST=true -DBUILD_TOOLS=true -DBUILD_HTTP=false .. > /dev/null
|
make -j 8 install
|
||||||
fi
|
|
||||||
make -j
|
|
||||||
make install
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function runCasesOneByOne () {
|
function runCasesOneByOne () {
|
||||||
|
@ -72,29 +83,18 @@ function runCasesOneByOne () {
|
||||||
if [[ "$2" == "sim" ]] && [[ $line == *"script"* ]]; then
|
if [[ "$2" == "sim" ]] && [[ $line == *"script"* ]]; then
|
||||||
case=`echo $cmd | cut -d' ' -f 3`
|
case=`echo $cmd | cut -d' ' -f 3`
|
||||||
start_time=`date +%s`
|
start_time=`date +%s`
|
||||||
date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && $cmd > /dev/null 2>&1 && \
|
date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && timeout 20m $cmd > /dev/null 2>&1 && \
|
||||||
echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT \
|
echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT \
|
||||||
|| echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
|
|| echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||||
end_time=`date +%s`
|
end_time=`date +%s`
|
||||||
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_COVERAGE_REPORT
|
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_COVERAGE_REPORT
|
||||||
elif [[ "$2" == "system-test" ]] && [[ $line == *"system-test"* ]]; then
|
elif [[ "$line" == *"$2"* ]]; then
|
||||||
if [[ "$cmd" == *"pytest.sh"* ]]; then
|
if [[ "$cmd" == *"pytest.sh"* ]]; then
|
||||||
cmd=`echo $cmd | cut -d' ' -f 2-20`
|
cmd=`echo $cmd | cut -d' ' -f 2-20`
|
||||||
fi
|
fi
|
||||||
case=`echo $cmd | cut -d' ' -f 4-20`
|
case=`echo $cmd | cut -d' ' -f 4-20`
|
||||||
start_time=`date +%s`
|
start_time=`date +%s`
|
||||||
date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && $cmd > /dev/null 2>&1 && \
|
date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && timeout 20m $cmd > /dev/null 2>&1 && \
|
||||||
echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT || \
|
|
||||||
echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
|
|
||||||
end_time=`date +%s`
|
|
||||||
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_COVERAGE_REPORT
|
|
||||||
elif [[ "$2" == "develop-test" ]] && [[ $line == *"develop-test"* ]]; then
|
|
||||||
if [[ "$cmd" == *"pytest.sh"* ]]; then
|
|
||||||
cmd=`echo $cmd | cut -d' ' -f 2-20`
|
|
||||||
fi
|
|
||||||
case=`echo $cmd | cut -d' ' -f 4-20`
|
|
||||||
start_time=`date +%s`
|
|
||||||
date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && $cmd > /dev/null 2>&1 && \
|
|
||||||
echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT || \
|
echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT || \
|
||||||
echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
|
echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||||
end_time=`date +%s`
|
end_time=`date +%s`
|
||||||
|
@ -109,19 +109,6 @@ function runUnitTest() {
|
||||||
echo " $TDENGINE_DIR/debug"
|
echo " $TDENGINE_DIR/debug"
|
||||||
cd $TDENGINE_DIR/debug
|
cd $TDENGINE_DIR/debug
|
||||||
ctest -j12
|
ctest -j12
|
||||||
|
|
||||||
echo " $TDENGINE_DIR/tests/script/api"
|
|
||||||
cd $TDENGINE_DIR/tests/script/api
|
|
||||||
make clean && make
|
|
||||||
|
|
||||||
stopTaosd
|
|
||||||
stopTaosadapter
|
|
||||||
|
|
||||||
nohup taosd -c /etc/taos >> /dev/null 2>&1 &
|
|
||||||
./batchprepare 127.0.0.1
|
|
||||||
./dbTableRoute 127.0.0.1
|
|
||||||
./stopquery 127.0.0.1 demo t1
|
|
||||||
|
|
||||||
echo "3.0 unit test done"
|
echo "3.0 unit test done"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -129,7 +116,7 @@ function runSimCases() {
|
||||||
echo "=== Run sim cases ==="
|
echo "=== Run sim cases ==="
|
||||||
|
|
||||||
cd $TDENGINE_DIR/tests/script
|
cd $TDENGINE_DIR/tests/script
|
||||||
runCasesOneByOne ../parallel_test/cases.task sim
|
runCasesOneByOne $TDENGINE_DIR/tests/parallel_test/cases.task sim
|
||||||
|
|
||||||
totalSuccess=`grep 'sim success' $TDENGINE_COVERAGE_REPORT | wc -l`
|
totalSuccess=`grep 'sim success' $TDENGINE_COVERAGE_REPORT | wc -l`
|
||||||
if [ "$totalSuccess" -gt "0" ]; then
|
if [ "$totalSuccess" -gt "0" ]; then
|
||||||
|
@ -148,9 +135,15 @@ function runPythonCases() {
|
||||||
cd $TDENGINE_DIR/tests/parallel_test
|
cd $TDENGINE_DIR/tests/parallel_test
|
||||||
sed -i '/compatibility.py/d' cases.task
|
sed -i '/compatibility.py/d' cases.task
|
||||||
|
|
||||||
|
# army
|
||||||
|
cd $TDENGINE_DIR/tests/army
|
||||||
|
runCasesOneByOne ../parallel_test/cases.task army
|
||||||
|
|
||||||
|
# system-test
|
||||||
cd $TDENGINE_DIR/tests/system-test
|
cd $TDENGINE_DIR/tests/system-test
|
||||||
runCasesOneByOne ../parallel_test/cases.task system-test
|
runCasesOneByOne ../parallel_test/cases.task system-test
|
||||||
|
|
||||||
|
# develop-test
|
||||||
cd $TDENGINE_DIR/tests/develop-test
|
cd $TDENGINE_DIR/tests/develop-test
|
||||||
runCasesOneByOne ../parallel_test/cases.task develop-test
|
runCasesOneByOne ../parallel_test/cases.task develop-test
|
||||||
|
|
||||||
|
@ -177,7 +170,7 @@ function runJDBCCases() {
|
||||||
stopTaosd
|
stopTaosd
|
||||||
stopTaosadapter
|
stopTaosadapter
|
||||||
|
|
||||||
nohup taosd -c /etc/taos >> /dev/null 2>&1 &
|
nohup $TDENGINE_DIR/debug/build/bin/taosd -c /etc/taos >> /dev/null 2>&1 &
|
||||||
nohup taosadapter >> /dev/null 2>&1 &
|
nohup taosadapter >> /dev/null 2>&1 &
|
||||||
|
|
||||||
mvn clean test > result.txt 2>&1
|
mvn clean test > result.txt 2>&1
|
||||||
|
@ -185,24 +178,6 @@ function runJDBCCases() {
|
||||||
echo -e "### JDBC test result: $summary ###" | tee -a $TDENGINE_COVERAGE_REPORT
|
echo -e "### JDBC test result: $summary ###" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||||
}
|
}
|
||||||
|
|
||||||
function runTaosKeeperCases() {
|
|
||||||
echo "=== Run taoskeeper cases ==="
|
|
||||||
|
|
||||||
cd $TAOSKEEPER_DIR
|
|
||||||
git checkout -- .
|
|
||||||
git reset --hard HEAD
|
|
||||||
git checkout master
|
|
||||||
git pull
|
|
||||||
|
|
||||||
stopTaosd
|
|
||||||
stopTaosadapter
|
|
||||||
|
|
||||||
taosd -c /etc/taos >> /dev/null 2>&1 &
|
|
||||||
taosadapter >> /dev/null 2>&1 &
|
|
||||||
|
|
||||||
go mod tidy && go test -v ./...
|
|
||||||
}
|
|
||||||
|
|
||||||
function runTest() {
|
function runTest() {
|
||||||
echo "run Test"
|
echo "run Test"
|
||||||
|
|
||||||
|
@ -214,7 +189,6 @@ function runTest() {
|
||||||
runSimCases
|
runSimCases
|
||||||
runPythonCases
|
runPythonCases
|
||||||
runJDBCCases
|
runJDBCCases
|
||||||
runTaosKeeperCases
|
|
||||||
|
|
||||||
stopTaosd
|
stopTaosd
|
||||||
cd $TDENGINE_DIR/tests/script
|
cd $TDENGINE_DIR/tests/script
|
||||||
|
@ -229,41 +203,34 @@ function lcovFunc {
|
||||||
cd $TDENGINE_DIR
|
cd $TDENGINE_DIR
|
||||||
|
|
||||||
# collect data
|
# collect data
|
||||||
lcov -d . --capture --rc lcov_branch_coverage=1 --rc genhtml_branch_coverage=1 --no-external -b $TDENGINE_DIR -o coverage.info
|
lcov --capture -d $TDENGINE_DIR -b $TDENGINE_DIR -o coverage.info --ignore-errors negative,mismatch,inconsistent,source --branch-coverage --function-coverage --no-external
|
||||||
|
|
||||||
# remove exclude paths
|
# remove exclude paths
|
||||||
if [ "$branch" == "main" ] ; then
|
|
||||||
lcov --remove coverage.info \
|
lcov --remove coverage.info \
|
||||||
'*/contrib/*' '*/tests/*' '*/test/*' '*/tools/*' '*/libs/sync/*'\
|
'*/contrib/*' '*/tests/*' '*/test/*' '*/packaging/*' '*/taos-tools/*' '*/taosadapter/*' '*/TSZ/*' \
|
||||||
'*/AccessBridgeCalls.c' '*/ttszip.c' '*/dataInserter.c' '*/tlinearhash.c' '*/tsimplehash.c' '*/tsdbDiskData.c'\
|
'*/AccessBridgeCalls.c' '*/ttszip.c' '*/dataInserter.c' '*/tlinearhash.c' '*/tsimplehash.c' '*/tsdbDiskData.c'\
|
||||||
'*/texpr.c' '*/runUdf.c' '*/schDbg.c' '*/syncIO.c' '*/tdbOs.c' '*/pushServer.c' '*/osLz4.c'\
|
'*/texpr.c' '*/runUdf.c' '*/schDbg.c' '*/syncIO.c' '*/tdbOs.c' '*/pushServer.c' '*/osLz4.c'\
|
||||||
'*/tbase64.c' '*/tbuffer.c' '*/tdes.c' '*/texception.c' '*/tidpool.c' '*/tmempool.c'\
|
'*/tbase64.c' '*/tbuffer.c' '*/tdes.c' '*/texception.c' '*/examples/*' '*/tidpool.c' '*/tmempool.c'\
|
||||||
'*/clientJniConnector.c' '*/clientTmqConnector.c' '*/version.c' '*/shellAuto.c' '*/shellTire.c'\
|
'*/clientJniConnector.c' '*/clientTmqConnector.c' '*/version.c'\
|
||||||
'*/tthread.c' '*/tversion.c' '*/ctgDbg.c' '*/schDbg.c' '*/qwDbg.c' '*/tencode.h' '*/catalog.c'\
|
'*/tthread.c' '*/tversion.c' '*/ctgDbg.c' '*/schDbg.c' '*/qwDbg.c' '*/tencode.h' \
|
||||||
'*/tqSnapshot.c' '*/tsdbSnapshot.c''*/metaSnapshot.c' '*/smaSnapshot.c' '*/tqOffsetSnapshot.c'\
|
'*/shellAuto.c' '*/shellTire.c' '*/shellCommand.c'\
|
||||||
'*/vnodeSnapshot.c' '*/metaSnapshot.c' '*/tsdbSnapshot.c' '*/mndGrant.c' '*/mndSnode.c' '*/streamRecover.c'\
|
'*/sql.c' '*/sql.y'\
|
||||||
'*/osAtomic.c' '*/osDir.c' '*/osFile.c' '*/osMath.c' '*/osSignal.c' '*/osSleep.c' '*/osString.c' '*/osSystem.c'\
|
--branch-coverage --function-coverage -o coverage.info
|
||||||
'*/osThread.c' '*/osTime.c' '*/osTimezone.c' \
|
|
||||||
--rc lcov_branch_coverage=1 -o coverage.info
|
|
||||||
else
|
|
||||||
lcov --remove coverage.info \
|
|
||||||
'*/tests/*' '*/test/*' '*/deps/*' '*/plugins/*' '*/taosdef.h' '*/ttype.h' '*/tarithoperator.c' '*/TSDBJNIConnector.c' '*/taosdemo.c' '*/clientJniConnector.c'\
|
|
||||||
--rc lcov_branch_coverage=1 -o coverage.info
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
# generate result
|
# generate result
|
||||||
echo "generate result"
|
echo "generate result"
|
||||||
lcov -l --rc lcov_branch_coverage=1 coverage.info | tee -a $TDENGINE_COVERAGE_REPORT
|
lcov -l --branch-coverage --function-coverage coverage.info | tee -a $TDENGINE_COVERAGE_REPORT
|
||||||
|
|
||||||
|
sed -i 's/\/root\/TDengine\/sql.c/\/root\/TDengine\/source\/libs\/parser\/inc\/sql.c/g' coverage.info
|
||||||
|
sed -i 's/\/root\/TDengine\/sql.y/\/root\/TDengine\/source\/libs\/parser\/inc\/sql.y/g' coverage.info
|
||||||
|
|
||||||
# push result to coveralls.io
|
# push result to coveralls.io
|
||||||
echo "push result to coveralls.io"
|
echo "push result to coveralls.io"
|
||||||
/usr/local/bin/coveralls-lcov coverage.info -t o7uY02qEAgKyJHrkxLGiCOTfL3IGQR2zm | tee -a $TDENGINE_COVERAGE_REPORT
|
/usr/local/bin/coveralls-lcov coverage.info -b $branch -t o7uY02qEAgKyJHrkxLGiCOTfL3IGQR2zm | tee -a $TDENGINE_COVERAGE_REPORT
|
||||||
|
|
||||||
#/root/pxiao/checkCoverageFile.sh -s $TDENGINE_DIR/source -f $TDENGINE_COVERAGE_REPORT
|
#/root/pxiao/checkCoverageFile.sh -s $TDENGINE_DIR/source -f $TDENGINE_COVERAGE_REPORT
|
||||||
#cat /root/pxiao/fileListNoCoverage.log | tee -a $TDENGINE_COVERAGE_REPORT
|
#cat /root/pxiao/fileListNoCoverage.log | tee -a $TDENGINE_COVERAGE_REPORT
|
||||||
cat $TDENGINE_COVERAGE_REPORT | grep "| 0.0%" | awk -F "%" '{print $1}' | awk -F "|" '{if($2==0.0)print $1}' | tee -a $TDENGINE_COVERAGE_REPORT
|
cat $TDENGINE_COVERAGE_REPORT | grep "| 0.0%" | awk -F "%" '{print $1}' | awk -F "|" '{if($2==0.0)print $1}' | tee -a $TDENGINE_COVERAGE_REPORT
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function sendReport {
|
function sendReport {
|
||||||
|
@ -318,7 +285,7 @@ buildTDengine
|
||||||
runTest
|
runTest
|
||||||
lcovFunc
|
lcovFunc
|
||||||
|
|
||||||
sendReport
|
#sendReport
|
||||||
stopTaosd
|
stopTaosd
|
||||||
|
|
||||||
date >> $WORK_DIR/cron.log
|
date >> $WORK_DIR/cron.log
|
||||||
|
|
|
@ -13,25 +13,13 @@ class TDTestCase:
|
||||||
tdSql.init(conn.cursor(), True)
|
tdSql.init(conn.cursor(), True)
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
"""
|
|
||||||
timestamp输入插入规则:
|
|
||||||
对于插入的字段类型为timestamp类型的字段,只允许这么几种情况:
|
|
||||||
timestamp
|
|
||||||
timestamp +/- interval
|
|
||||||
interval + timestamp
|
|
||||||
timestamp可以是字符串譬如:"2023-12-05 00:00:00.000", 也可以是int型, 譬如:1701619200000
|
|
||||||
interval支持:b, u, a, s, m, h, d, w 不支持n, y,譬如:1h, 2d
|
|
||||||
|
|
||||||
仅支持2元表达式,譬如:timestamp + 2h, 不支持2元以上表达,譬如timestamp + 2h + 1d
|
|
||||||
"""
|
|
||||||
|
|
||||||
tdSql.execute("create database test_insert_timestamp PRECISION 'ns';")
|
tdSql.execute("create database test_insert_timestamp PRECISION 'ns';")
|
||||||
tdSql.execute("use test_insert_timestamp;")
|
tdSql.execute("use test_insert_timestamp;")
|
||||||
tdSql.execute("create stable st(ts timestamp, c1 int) tags(id int);")
|
tdSql.execute("create stable st(ts timestamp, c1 int) tags(id int);")
|
||||||
tdSql.execute("create table test_t using st tags(1);")
|
tdSql.execute("create table test_t using st tags(1);")
|
||||||
|
|
||||||
expectErrInfo = "syntax error"
|
expectErrInfo = "syntax error"
|
||||||
# 异常场景:timestamp + timestamp
|
# abnormal scenario: timestamp + timestamp
|
||||||
tdSql.error("insert into test_t values(now + today(), 1 );", expectErrInfo=expectErrInfo, fullMatched=False)
|
tdSql.error("insert into test_t values(now + today(), 1 );", expectErrInfo=expectErrInfo, fullMatched=False)
|
||||||
tdSql.error("insert into test_t values(now - today(), 1 );", expectErrInfo=expectErrInfo, fullMatched=False)
|
tdSql.error("insert into test_t values(now - today(), 1 );", expectErrInfo=expectErrInfo, fullMatched=False)
|
||||||
tdSql.error("insert into test_t values(today() + now(), 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False)
|
tdSql.error("insert into test_t values(today() + now(), 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False)
|
||||||
|
@ -40,24 +28,24 @@ class TDTestCase:
|
||||||
tdSql.error("insert into test_t values('2023-11-28 00:00:00.000' + 1701111600000, 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False)
|
tdSql.error("insert into test_t values('2023-11-28 00:00:00.000' + 1701111600000, 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False)
|
||||||
tdSql.error("insert into test_t values(1701111500000 + 1701111600000, 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False)
|
tdSql.error("insert into test_t values(1701111500000 + 1701111600000, 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False)
|
||||||
|
|
||||||
# 异常场景:timestamp + interval + interval
|
# abnormal scenario: timestamp + interval + interval
|
||||||
tdSql.error("insert into test_t values(today() + 1d + 1s, 1);", expectErrInfo=expectErrInfo, fullMatched=False)
|
tdSql.error("insert into test_t values(today() + 1d + 1s, 1);", expectErrInfo=expectErrInfo, fullMatched=False)
|
||||||
|
|
||||||
# 异常场景:interval - timestamp
|
# abnormal scenario: interval - timestamp
|
||||||
tdSql.error("insert into test_t values(2h - now(), 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False)
|
tdSql.error("insert into test_t values(2h - now(), 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False)
|
||||||
tdSql.error("insert into test_t values(2h - today(), 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False)
|
tdSql.error("insert into test_t values(2h - today(), 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False)
|
||||||
|
|
||||||
# 异常场景:interval + interval
|
# abnormal scenario: interval + interval
|
||||||
tdSql.error("insert into test_t values(2h - 1h, 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False)
|
tdSql.error("insert into test_t values(2h - 1h, 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False)
|
||||||
tdSql.error("insert into test_t values(2h + 1h, 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False)
|
tdSql.error("insert into test_t values(2h + 1h, 1 ); ", expectErrInfo=expectErrInfo, fullMatched=False)
|
||||||
|
|
||||||
# 异常场景:非法interval类型n
|
# abnormal scenario: non-support datatype - n
|
||||||
tdSql.error("insert into test_t values(today() + 2n, 7); ", expectErrInfo=expectErrInfo, fullMatched=False)
|
tdSql.error("insert into test_t values(today() + 2n, 7); ", expectErrInfo=expectErrInfo, fullMatched=False)
|
||||||
|
|
||||||
# 异常场景:非法interval类型y
|
# abnormal scenario: non-support datatype - y
|
||||||
tdSql.error("insert into test_t values(today() - 2y, 8);", expectErrInfo=expectErrInfo, fullMatched=False)
|
tdSql.error("insert into test_t values(today() - 2y, 8);", expectErrInfo=expectErrInfo, fullMatched=False)
|
||||||
|
|
||||||
# 异常场景:数据类型不对
|
# abnormal scenario: non-support datatype
|
||||||
tdSql.error("insert into test_t values('a1701619200000', 8);", expectErrInfo=expectErrInfo, fullMatched=False)
|
tdSql.error("insert into test_t values('a1701619200000', 8);", expectErrInfo=expectErrInfo, fullMatched=False)
|
||||||
tdSql.error("insert into test_t values('ss2023-12-05 00:00:00.000' + '1701619200000', 1);", expectErrInfo=expectErrInfo, fullMatched=False)
|
tdSql.error("insert into test_t values('ss2023-12-05 00:00:00.000' + '1701619200000', 1);", expectErrInfo=expectErrInfo, fullMatched=False)
|
||||||
tdSql.error("insert into test_t values(123456, 1);", expectErrInfo="Timestamp data out of range")
|
tdSql.error("insert into test_t values(123456, 1);", expectErrInfo="Timestamp data out of range")
|
||||||
|
@ -66,31 +54,31 @@ class TDTestCase:
|
||||||
tdSql.error("insert into test_t values(None, 1);", expectErrInfo=expectErrInfo, fullMatched=False)
|
tdSql.error("insert into test_t values(None, 1);", expectErrInfo=expectErrInfo, fullMatched=False)
|
||||||
tdSql.error("insert into test_t values(null, 1);", expectErrInfo=expectErrInfo, fullMatched=False)
|
tdSql.error("insert into test_t values(null, 1);", expectErrInfo=expectErrInfo, fullMatched=False)
|
||||||
|
|
||||||
# 异常场景:格式不对
|
# abnormal scenario: incorrect format
|
||||||
tdSql.error("insert into test_t values('2023-122-05 00:00:00.000' + '1701619200000', 1);", expectErrInfo=expectErrInfo, fullMatched=False)
|
tdSql.error("insert into test_t values('2023-122-05 00:00:00.000' + '1701619200000', 1);", expectErrInfo=expectErrInfo, fullMatched=False)
|
||||||
tdSql.error("insert into test_t values('2023-12--05 00:00:00.000' + '1701619200000', 1);", expectErrInfo=expectErrInfo, fullMatched=False)
|
tdSql.error("insert into test_t values('2023-12--05 00:00:00.000' + '1701619200000', 1);", expectErrInfo=expectErrInfo, fullMatched=False)
|
||||||
tdSql.error("insert into test_t values('12/12/2023' + 10a, 1);", expectErrInfo=expectErrInfo, fullMatched=False)
|
tdSql.error("insert into test_t values('12/12/2023' + 10a, 1);", expectErrInfo=expectErrInfo, fullMatched=False)
|
||||||
tdSql.error("insert into test_t values(1701619200000111, 1);", expectErrInfo="Timestamp data out of range", fullMatched=False)
|
tdSql.error("insert into test_t values(1701619200000111, 1);", expectErrInfo="Timestamp data out of range", fullMatched=False)
|
||||||
|
|
||||||
# 正常场景:timestamp + interval
|
# normal scenario:timestamp + interval
|
||||||
tdSql.execute("insert into test_t values(today() + 2b, 1);")
|
tdSql.execute("insert into test_t values(today() + 2b, 1);")
|
||||||
tdSql.execute("insert into test_t values(1701619200000000000 + 2u, 2);")
|
tdSql.execute("insert into test_t values(1701619200000000000 + 2u, 2);")
|
||||||
tdSql.execute("insert into test_t values(today + 2a, 3);")
|
tdSql.execute("insert into test_t values(today + 2a, 3);")
|
||||||
tdSql.execute("insert into test_t values('2023-12-05 23:59:59.999' + 2a, 4);")
|
tdSql.execute("insert into test_t values('2023-12-05 23:59:59.999' + 2a, 4);")
|
||||||
tdSql.execute("insert into test_t values(1701921599000000000 + 3a, 5);")
|
tdSql.execute("insert into test_t values(1701921599000000000 + 3a, 5);")
|
||||||
|
|
||||||
# 正常场景:timestamp - interval
|
# normal scenario:timestamp - interval
|
||||||
tdSql.execute("insert into test_t values(today() - 2s, 6);")
|
tdSql.execute("insert into test_t values(today() - 2s, 6);")
|
||||||
tdSql.execute("insert into test_t values(now() - 2m, 7);")
|
tdSql.execute("insert into test_t values(now() - 2m, 7);")
|
||||||
tdSql.execute("insert into test_t values(today - 2h, 8);")
|
tdSql.execute("insert into test_t values(today - 2h, 8);")
|
||||||
tdSql.execute("insert into test_t values('2023-12-05 00:00:00.000000000' - 2a, 9);")
|
tdSql.execute("insert into test_t values('2023-12-05 00:00:00.000000000' - 2a, 9);")
|
||||||
tdSql.execute("insert into test_t values(1701669000000000000 - 2a, 10);")
|
tdSql.execute("insert into test_t values(1701669000000000000 - 2a, 10);")
|
||||||
|
|
||||||
# 正常场景:interval + timestamp
|
# normal scenario:interval + timestamp
|
||||||
tdSql.execute("insert into test_t values(2d + now, 11);")
|
tdSql.execute("insert into test_t values(2d + now, 11);")
|
||||||
tdSql.execute("insert into test_t values(2w + today, 12);")
|
tdSql.execute("insert into test_t values(2w + today, 12);")
|
||||||
|
|
||||||
# 正常场景:timestamp
|
# normal scenario:timestamp
|
||||||
tdSql.execute("insert into test_t values('2023-12-05 00:00:00.000', 13);")
|
tdSql.execute("insert into test_t values('2023-12-05 00:00:00.000', 13);")
|
||||||
tdSql.execute("insert into test_t values(1701629100000000000, 14);")
|
tdSql.execute("insert into test_t values(1701629100000000000, 14);")
|
||||||
tdSql.execute("insert into test_t values(now() + 2s, 15);")
|
tdSql.execute("insert into test_t values(now() + 2s, 15);")
|
||||||
|
@ -102,7 +90,7 @@ class TDTestCase:
|
||||||
tdSql.execute("insert into test_t values(1701619200000000000, -5);")
|
tdSql.execute("insert into test_t values(1701619200000000000, -5);")
|
||||||
tdSql.execute("insert into test_t values('2023-12-05 12:12:12' + 10a, 19);")
|
tdSql.execute("insert into test_t values('2023-12-05 12:12:12' + 10a, 19);")
|
||||||
|
|
||||||
# 验证数据
|
# data verification
|
||||||
tdSql.query(f'select ts,c1 from test_t order by c1;')
|
tdSql.query(f'select ts,c1 from test_t order by c1;')
|
||||||
tdSql.checkRows(22)
|
tdSql.checkRows(22)
|
||||||
tdSql.checkEqual(tdSql.queryResult[0][0], 1699977600000000000) # c1=-15
|
tdSql.checkEqual(tdSql.queryResult[0][0], 1699977600000000000) # c1=-15
|
||||||
|
@ -133,12 +121,10 @@ class TDTestCase:
|
||||||
tdSql.execute("drop database if exists test_insert_timestamp;")
|
tdSql.execute("drop database if exists test_insert_timestamp;")
|
||||||
|
|
||||||
def __convert_ts_to_date(self, ts: int) -> str:
|
def __convert_ts_to_date(self, ts: int) -> str:
|
||||||
# 创建datetime对象并进行转换
|
|
||||||
dt_object = datetime.datetime.fromtimestamp(ts / 1e9)
|
dt_object = datetime.datetime.fromtimestamp(ts / 1e9)
|
||||||
|
|
||||||
# 格式化日期字符串
|
|
||||||
formatted_date = dt_object.strftime('%Y-%m-%d')
|
formatted_date = dt_object.strftime('%Y-%m-%d')
|
||||||
# print("转换后的日期为:", formatted_date)
|
|
||||||
return formatted_date
|
return formatted_date
|
||||||
|
|
||||||
def __get_today_ts(self) -> int:
|
def __get_today_ts(self) -> int:
|
||||||
|
|
|
@ -16,7 +16,7 @@ class TDTestCase:
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
dbname = "db"
|
dbname = "db"
|
||||||
tdSql.prepare()
|
tdSql.prepare(dbname=dbname, drop=True, stt_trigger=1)
|
||||||
|
|
||||||
tdSql.execute(f'''create table {dbname}.ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
tdSql.execute(f'''create table {dbname}.ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
|
||||||
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
|
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
|
||||||
|
|
|
@ -0,0 +1,60 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import taos
|
||||||
|
from util.log import tdLog
|
||||||
|
from util.cases import tdCases
|
||||||
|
from util.sql import tdSql
|
||||||
|
from util.dnodes import tdDnodes
|
||||||
|
import random
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
|
||||||
|
def restartTaosd(self, index=1, dbname="db"):
|
||||||
|
tdDnodes.stop(index)
|
||||||
|
tdDnodes.startWithoutSleep(index)
|
||||||
|
tdSql.execute(f"use d")
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.execute("drop database if exists d");
|
||||||
|
tdSql.execute("create database d");
|
||||||
|
tdSql.execute("use d");
|
||||||
|
tdSql.execute("create table st(ts timestamp, f int) tags (t int)")
|
||||||
|
|
||||||
|
for i in range(-2048, 2047):
|
||||||
|
ts = 1626624000000 + i;
|
||||||
|
tdSql.execute(f"insert into ct1 using st tags(1) values({ts}, {i})")
|
||||||
|
|
||||||
|
tdSql.execute("flush database d")
|
||||||
|
for i in range(1638):
|
||||||
|
ts = 1648758398208 + i
|
||||||
|
tdSql.execute(f"insert into ct1 using st tags(1) values({ts}, {i})")
|
||||||
|
tdSql.execute("insert into ct1 using st tags(1) values(1648970742528, 1638)")
|
||||||
|
tdSql.execute("flush database d")
|
||||||
|
|
||||||
|
tdSql.query("select count(ts) from ct1 interval(17n, 5n)")
|
||||||
|
self.restartTaosd()
|
||||||
|
tdSql.query("select count(ts) from ct1 interval(17n, 5n)")
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -169,6 +169,16 @@ class TDTestCase:
|
||||||
self.check_explain_res_has_row("Partition on", self.explain_sql(sql))
|
self.check_explain_res_has_row("Partition on", self.explain_sql(sql))
|
||||||
self.check_explain_res_has_row("Sort", self.explain_sql(sql_hint))
|
self.check_explain_res_has_row("Sort", self.explain_sql(sql_hint))
|
||||||
|
|
||||||
|
sql = 'select count(*), c1 from meters partition by c1'
|
||||||
|
sql_hint = 'select /*+ sort_for_group() partition_first()*/ count(*), c1 from meters partition by c1'
|
||||||
|
self.check_explain_res_has_row("Sort", self.explain_sql(sql_hint))
|
||||||
|
sql_hint = 'select /*+ partition_first()*/ count(*), c1 from meters partition by c1'
|
||||||
|
self.check_explain_res_has_row("Partition on", self.explain_sql(sql_hint))
|
||||||
|
sql_hint = 'select /*+ partition_first() sort_for_group()*/ count(*), c1 from meters partition by c1'
|
||||||
|
self.check_explain_res_has_row("Partition on", self.explain_sql(sql_hint))
|
||||||
|
sql_hint = 'select /*+ sort_for_group() partition_first()*/ count(*), c1 from meters partition by c1'
|
||||||
|
self.check_explain_res_has_row("Sort", self.explain_sql(sql_hint))
|
||||||
|
|
||||||
def add_order_by(self, sql: str, order_by: str, select_list: str = "*") -> str:
|
def add_order_by(self, sql: str, order_by: str, select_list: str = "*") -> str:
|
||||||
return "select %s from (%s)t order by %s" % (select_list, sql, order_by)
|
return "select %s from (%s)t order by %s" % (select_list, sql, order_by)
|
||||||
|
|
||||||
|
|
|
@ -59,10 +59,14 @@ class TDTestCase:
|
||||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||||
|
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
# create stb/ctb/tb stream
|
# create stb/ctb/tb stream
|
||||||
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {self.ctb_name} partition by {partition} {partition_elm_alias} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="at_once", ignore_expired=ignore_expired, ignore_update=ignore_update, subtable_value=ctb_subtable_value, fill_history_value=fill_history_value)
|
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {self.ctb_name} partition by {partition} {partition_elm_alias} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="at_once", ignore_expired=ignore_expired, ignore_update=ignore_update, subtable_value=ctb_subtable_value, fill_history_value=fill_history_value)
|
||||||
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="at_once", ignore_expired=ignore_expired, ignore_update=ignore_update, subtable_value=tb_subtable_value, fill_history_value=fill_history_value)
|
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="at_once", ignore_expired=ignore_expired, ignore_update=ignore_update, subtable_value=tb_subtable_value, fill_history_value=fill_history_value)
|
||||||
|
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
for i in range(self.tdCom.range_count):
|
for i in range(self.tdCom.range_count):
|
||||||
ctb_name = self.tdCom.get_long_name()
|
ctb_name = self.tdCom.get_long_name()
|
||||||
self.tdCom.screate_ctable(stbname=self.stb_name, ctbname=ctb_name)
|
self.tdCom.screate_ctable(stbname=self.stb_name, ctbname=ctb_name)
|
||||||
|
|
|
@ -59,6 +59,9 @@ class TDTestCase:
|
||||||
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} state_window({stream_state_window})', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_history_value=fill_history_value)
|
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} state_window({stream_state_window})', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_history_value=fill_history_value)
|
||||||
range_times = self.tdCom.range_count
|
range_times = self.tdCom.range_count
|
||||||
state_window_max = self.tdCom.dataDict['state_window_max']
|
state_window_max = self.tdCom.dataDict['state_window_max']
|
||||||
|
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
for i in range(range_times):
|
for i in range(range_times):
|
||||||
state_window_value = random.randint(int((i)*state_window_max/range_times), int((i+1)*state_window_max/range_times))
|
state_window_value = random.randint(int((i)*state_window_max/range_times), int((i+1)*state_window_max/range_times))
|
||||||
for i in range(2, range_times+3):
|
for i in range(2, range_times+3):
|
||||||
|
|
|
@ -50,6 +50,8 @@ class TDTestCase:
|
||||||
# create stb/ctb/tb stream
|
# create stb/ctb/tb stream
|
||||||
self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, subtable_value=stb_subtable_value, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb)
|
self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, subtable_value=stb_subtable_value, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb)
|
||||||
|
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
init_num = 0
|
init_num = 0
|
||||||
for i in range(self.tdCom.range_count):
|
for i in range(self.tdCom.range_count):
|
||||||
if i == 0:
|
if i == 0:
|
||||||
|
|
Loading…
Reference in New Issue