Merge remote-tracking branch 'origin/fix/TD-26582' into case/TD-26408-MAIN
This commit is contained in:
commit
cb2a9ee005
|
@ -16,38 +16,7 @@ With the TDengine Java connector, Seeq effortlessly supports querying time serie
|
||||||
|
|
||||||
Please download Seeq Server and Seeq Data Lab software installation package from the [Seeq official website](https://www.seeq.com/customer-download).
|
Please download Seeq Server and Seeq Data Lab software installation package from the [Seeq official website](https://www.seeq.com/customer-download).
|
||||||
|
|
||||||
### Install and start Seeq Server
|
Please refer to [the Knowledge Base](https://support.seeq.com/kb/latest/cloud/) for Seeq installation and configuration.
|
||||||
|
|
||||||
```
|
|
||||||
tar xvzf seeq-server-xxx.tar.gz
|
|
||||||
cd seeq-server-installer
|
|
||||||
sudo ./install
|
|
||||||
|
|
||||||
sudo seeq service enable
|
|
||||||
sudo seeq start
|
|
||||||
```
|
|
||||||
|
|
||||||
### Install and start Seeq Data Lab Server
|
|
||||||
|
|
||||||
Seeq Data Lab needs to be installed on a separate server from Seeq Server and connected to Seeq Server through configuration. For detailed installation and configuration instructions, please refer to [the official documentation](https://support.seeq.com/space/KB/1034059842).
|
|
||||||
|
|
||||||
```
|
|
||||||
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
|
|
||||||
sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq
|
|
||||||
sudo seeq config set Network/DataLab/Hostname localhost
|
|
||||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231)
|
|
||||||
sudo seeq config set Network/Hostname <value> # the host IP or URL of the main Seeq Server
|
|
||||||
|
|
||||||
# If the main Seeq server is configured to listen over HTTPS
|
|
||||||
sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443)
|
|
||||||
|
|
||||||
# If the main Seeq server is NOT configured to listen over HTTPS
|
|
||||||
sudo seeq config set Network/Webserver/Port <value>
|
|
||||||
|
|
||||||
#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server:
|
|
||||||
sudo seeq config set Network/DataLab/Hostname <value> # the host IP (not URL) of the Data Lab server
|
|
||||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231
|
|
||||||
```
|
|
||||||
|
|
||||||
### Install TDengine on-premise instance
|
### Install TDengine on-premise instance
|
||||||
|
|
||||||
|
|
|
@ -14,40 +14,7 @@ Seeq 是制造业和工业互联网(IIOT)高级分析软件。Seeq 支持在
|
||||||
|
|
||||||
### Seeq 安装方法
|
### Seeq 安装方法
|
||||||
|
|
||||||
从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。
|
从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 知识库]( https://support.seeq.com/kb/latest/cloud/)。
|
||||||
|
|
||||||
### Seeq Server 安装和启动
|
|
||||||
|
|
||||||
```
|
|
||||||
tar xvzf seeq-server-xxx.tar.gz
|
|
||||||
cd seeq-server-installer
|
|
||||||
sudo ./install
|
|
||||||
|
|
||||||
sudo seeq service enable
|
|
||||||
sudo seeq start
|
|
||||||
```
|
|
||||||
|
|
||||||
### Seeq Data Lab Server 安装和启动
|
|
||||||
|
|
||||||
Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 官方文档](https://support.seeq.com/space/KB/1034059842)。
|
|
||||||
|
|
||||||
```
|
|
||||||
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
|
|
||||||
sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq
|
|
||||||
sudo seeq config set Network/DataLab/Hostname localhost
|
|
||||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231)
|
|
||||||
sudo seeq config set Network/Hostname <value> # the host IP or URL of the main Seeq Server
|
|
||||||
|
|
||||||
# If the main Seeq server is configured to listen over HTTPS
|
|
||||||
sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443)
|
|
||||||
|
|
||||||
# If the main Seeq server is NOT configured to listen over HTTPS
|
|
||||||
sudo seeq config set Network/Webserver/Port <value>
|
|
||||||
|
|
||||||
#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server:
|
|
||||||
sudo seeq config set Network/DataLab/Hostname <value> # the host IP (not URL) of the Data Lab server
|
|
||||||
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231
|
|
||||||
```
|
|
||||||
|
|
||||||
## TDengine 本地实例安装方法
|
## TDengine 本地实例安装方法
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define AUDIT_DETAIL_MAX 64000
|
#define AUDIT_DETAIL_MAX 65472
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
const char *server;
|
const char *server;
|
||||||
|
|
|
@ -29,6 +29,21 @@ extern "C" {
|
||||||
#ifndef _STREAM_H_
|
#ifndef _STREAM_H_
|
||||||
#define _STREAM_H_
|
#define _STREAM_H_
|
||||||
|
|
||||||
|
#define ONE_MiB_F (1048576.0)
|
||||||
|
#define ONE_KiB_F (1024.0)
|
||||||
|
#define SIZE_IN_MiB(_v) ((_v) / ONE_MiB_F)
|
||||||
|
#define SIZE_IN_KiB(_v) ((_v) / ONE_KiB_F)
|
||||||
|
|
||||||
|
|
||||||
|
#define TASK_DOWNSTREAM_READY 0x0
|
||||||
|
#define TASK_DOWNSTREAM_NOT_READY 0x1
|
||||||
|
#define TASK_DOWNSTREAM_NOT_LEADER 0x2
|
||||||
|
#define TASK_SELF_NEW_STAGE 0x3
|
||||||
|
|
||||||
|
#define NODE_ROLE_UNINIT 0x1
|
||||||
|
#define NODE_ROLE_LEADER 0x2
|
||||||
|
#define NODE_ROLE_FOLLOWER 0x3
|
||||||
|
|
||||||
typedef struct SStreamTask SStreamTask;
|
typedef struct SStreamTask SStreamTask;
|
||||||
|
|
||||||
#define SSTREAM_TASK_VER 2
|
#define SSTREAM_TASK_VER 2
|
||||||
|
@ -64,6 +79,7 @@ enum {
|
||||||
TASK_INPUT_STATUS__NORMAL = 1,
|
TASK_INPUT_STATUS__NORMAL = 1,
|
||||||
TASK_INPUT_STATUS__BLOCKED,
|
TASK_INPUT_STATUS__BLOCKED,
|
||||||
TASK_INPUT_STATUS__FAILED,
|
TASK_INPUT_STATUS__FAILED,
|
||||||
|
TASK_INPUT_STATUS__REFUSED,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
@ -197,7 +213,7 @@ typedef struct {
|
||||||
int32_t taskId;
|
int32_t taskId;
|
||||||
int32_t nodeId;
|
int32_t nodeId;
|
||||||
SEpSet epSet;
|
SEpSet epSet;
|
||||||
} STaskDispatcherFixedEp;
|
} STaskDispatcherFixed;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char stbFullName[TSDB_TABLE_FNAME_LEN];
|
char stbFullName[TSDB_TABLE_FNAME_LEN];
|
||||||
|
@ -259,8 +275,8 @@ typedef struct SStreamStatus {
|
||||||
int8_t schedStatus;
|
int8_t schedStatus;
|
||||||
int8_t keepTaskStatus;
|
int8_t keepTaskStatus;
|
||||||
bool appendTranstateBlock; // has append the transfer state data block already, todo: remove it
|
bool appendTranstateBlock; // has append the transfer state data block already, todo: remove it
|
||||||
int8_t timerActive; // timer is active
|
|
||||||
int8_t pauseAllowed; // allowed task status to be set to be paused
|
int8_t pauseAllowed; // allowed task status to be set to be paused
|
||||||
|
int32_t timerActive; // timer is active
|
||||||
} SStreamStatus;
|
} SStreamStatus;
|
||||||
|
|
||||||
typedef struct SDataRange {
|
typedef struct SDataRange {
|
||||||
|
@ -279,11 +295,16 @@ typedef struct SSTaskBasicInfo {
|
||||||
int64_t triggerParam; // in msec
|
int64_t triggerParam; // in msec
|
||||||
} SSTaskBasicInfo;
|
} SSTaskBasicInfo;
|
||||||
|
|
||||||
|
typedef struct SStreamDispatchReq SStreamDispatchReq;
|
||||||
|
|
||||||
typedef struct SDispatchMsgInfo {
|
typedef struct SDispatchMsgInfo {
|
||||||
void* pData; // current dispatch data
|
SStreamDispatchReq* pData; // current dispatch data
|
||||||
|
int8_t dispatchMsgType;
|
||||||
int16_t msgType; // dispatch msg type
|
int16_t msgType; // dispatch msg type
|
||||||
int32_t retryCount; // retry send data count
|
int32_t retryCount; // retry send data count
|
||||||
int64_t blockingTs; // output blocking timestamp
|
int64_t startTs; // dispatch start time, record total elapsed time for dispatch
|
||||||
|
SArray* pRetryList; // current dispatch successfully completed node of downstream
|
||||||
|
void* pTimer; // used to dispatch data after a given time duration
|
||||||
} SDispatchMsgInfo;
|
} SDispatchMsgInfo;
|
||||||
|
|
||||||
typedef struct STaskOutputInfo {
|
typedef struct STaskOutputInfo {
|
||||||
|
@ -302,22 +323,37 @@ typedef struct STaskSchedInfo {
|
||||||
void* pTimer;
|
void* pTimer;
|
||||||
} STaskSchedInfo;
|
} STaskSchedInfo;
|
||||||
|
|
||||||
typedef struct SSinkTaskRecorder {
|
typedef struct SSinkRecorder {
|
||||||
int64_t numOfSubmit;
|
int64_t numOfSubmit;
|
||||||
int64_t numOfBlocks;
|
int64_t numOfBlocks;
|
||||||
int64_t numOfRows;
|
int64_t numOfRows;
|
||||||
} SSinkTaskRecorder;
|
int64_t dataSize;
|
||||||
|
} SSinkRecorder;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct STaskExecStatisInfo {
|
||||||
int64_t created;
|
int64_t created;
|
||||||
int64_t init;
|
int64_t init;
|
||||||
|
int64_t start;
|
||||||
int64_t step1Start;
|
int64_t step1Start;
|
||||||
int64_t step2Start;
|
int64_t step2Start;
|
||||||
int64_t start;
|
|
||||||
int32_t updateCount;
|
int32_t updateCount;
|
||||||
int64_t latestUpdateTs;
|
int64_t latestUpdateTs;
|
||||||
|
int32_t processDataBlocks;
|
||||||
|
int64_t processDataSize;
|
||||||
|
int32_t dispatch;
|
||||||
|
int64_t dispatchDataSize;
|
||||||
|
int32_t checkpoint;
|
||||||
|
SSinkRecorder sink;
|
||||||
} STaskExecStatisInfo;
|
} STaskExecStatisInfo;
|
||||||
|
|
||||||
|
typedef struct SHistoryTaskInfo {
|
||||||
|
STaskId id;
|
||||||
|
void* pTimer;
|
||||||
|
int32_t tickCount;
|
||||||
|
int32_t retryTimes;
|
||||||
|
int32_t waitInterval;
|
||||||
|
} SHistoryTaskInfo;
|
||||||
|
|
||||||
typedef struct STokenBucket STokenBucket;
|
typedef struct STokenBucket STokenBucket;
|
||||||
typedef struct SMetaHbInfo SMetaHbInfo;
|
typedef struct SMetaHbInfo SMetaHbInfo;
|
||||||
|
|
||||||
|
@ -333,25 +369,23 @@ struct SStreamTask {
|
||||||
SCheckpointInfo chkInfo;
|
SCheckpointInfo chkInfo;
|
||||||
STaskExec exec;
|
STaskExec exec;
|
||||||
SDataRange dataRange;
|
SDataRange dataRange;
|
||||||
STaskId historyTaskId;
|
SHistoryTaskInfo hTaskInfo;
|
||||||
STaskId streamTaskId;
|
STaskId streamTaskId;
|
||||||
STaskExecStatisInfo taskExecInfo;
|
STaskExecStatisInfo execInfo;
|
||||||
SArray* pReadyMsgList; // SArray<SStreamChkptReadyInfo*>
|
SArray* pReadyMsgList; // SArray<SStreamChkptReadyInfo*>
|
||||||
TdThreadMutex lock; // secure the operation of set task status and puting data into inputQ
|
TdThreadMutex lock; // secure the operation of set task status and puting data into inputQ
|
||||||
SArray* pUpstreamInfoList;
|
SArray* pUpstreamInfoList;
|
||||||
|
|
||||||
// output
|
// output
|
||||||
union {
|
union {
|
||||||
STaskDispatcherFixedEp fixedEpDispatcher;
|
STaskDispatcherFixed fixedDispatcher;
|
||||||
STaskDispatcherShuffle shuffleDispatcher;
|
STaskDispatcherShuffle shuffleDispatcher;
|
||||||
STaskSinkTb tbSink;
|
STaskSinkTb tbSink;
|
||||||
STaskSinkSma smaSink;
|
STaskSinkSma smaSink;
|
||||||
STaskSinkFetch fetchSink;
|
STaskSinkFetch fetchSink;
|
||||||
};
|
};
|
||||||
SSinkTaskRecorder sinkRecorder;
|
|
||||||
STokenBucket* pTokenBucket;
|
|
||||||
|
|
||||||
void* launchTaskTimer;
|
STokenBucket* pTokenBucket;
|
||||||
SMsgCb* pMsgCb; // msg handle
|
SMsgCb* pMsgCb; // msg handle
|
||||||
SStreamState* pState; // state backend
|
SStreamState* pState; // state backend
|
||||||
SArray* pRspMsgList;
|
SArray* pRspMsgList;
|
||||||
|
@ -371,6 +405,13 @@ struct SStreamTask {
|
||||||
char reserve[256];
|
char reserve[256];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
typedef struct STaskStartInfo {
|
||||||
|
int64_t ts;
|
||||||
|
int32_t startedAfterNodeUpdate;
|
||||||
|
SHashObj* pReadyTaskSet; // tasks that are all ready for running stream processing
|
||||||
|
int32_t elapsedTime;
|
||||||
|
} STaskStartInfo;
|
||||||
|
|
||||||
// meta
|
// meta
|
||||||
typedef struct SStreamMeta {
|
typedef struct SStreamMeta {
|
||||||
char* path;
|
char* path;
|
||||||
|
@ -384,8 +425,8 @@ typedef struct SStreamMeta {
|
||||||
FTaskExpand* expandFunc;
|
FTaskExpand* expandFunc;
|
||||||
int32_t vgId;
|
int32_t vgId;
|
||||||
int64_t stage;
|
int64_t stage;
|
||||||
bool leader;
|
int32_t role;
|
||||||
int8_t taskWillbeLaunched;
|
STaskStartInfo startInfo;
|
||||||
SRWLatch lock;
|
SRWLatch lock;
|
||||||
int32_t walScanCounter;
|
int32_t walScanCounter;
|
||||||
void* streamBackend;
|
void* streamBackend;
|
||||||
|
@ -430,11 +471,12 @@ typedef struct {
|
||||||
int32_t taskId;
|
int32_t taskId;
|
||||||
} SStreamTaskRunReq;
|
} SStreamTaskRunReq;
|
||||||
|
|
||||||
typedef struct {
|
struct SStreamDispatchReq {
|
||||||
int32_t type;
|
int32_t type;
|
||||||
int64_t stage; // nodeId from upstream task
|
int64_t stage; // nodeId from upstream task
|
||||||
int64_t streamId;
|
int64_t streamId;
|
||||||
int32_t taskId;
|
int32_t taskId;
|
||||||
|
int32_t msgId; // msg id to identify if the incoming msg from the same sender
|
||||||
int32_t srcVgId;
|
int32_t srcVgId;
|
||||||
int32_t upstreamTaskId;
|
int32_t upstreamTaskId;
|
||||||
int32_t upstreamChildId;
|
int32_t upstreamChildId;
|
||||||
|
@ -443,7 +485,7 @@ typedef struct {
|
||||||
int64_t totalLen;
|
int64_t totalLen;
|
||||||
SArray* dataLen; // SArray<int32_t>
|
SArray* dataLen; // SArray<int32_t>
|
||||||
SArray* data; // SArray<SRetrieveTableRsp*>
|
SArray* data; // SArray<SRetrieveTableRsp*>
|
||||||
} SStreamDispatchReq;
|
};
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int64_t streamId;
|
int64_t streamId;
|
||||||
|
@ -451,7 +493,9 @@ typedef struct {
|
||||||
int32_t upstreamTaskId;
|
int32_t upstreamTaskId;
|
||||||
int32_t downstreamNodeId;
|
int32_t downstreamNodeId;
|
||||||
int32_t downstreamTaskId;
|
int32_t downstreamTaskId;
|
||||||
|
int32_t msgId;
|
||||||
int8_t inputStatus;
|
int8_t inputStatus;
|
||||||
|
int64_t stage;
|
||||||
} SStreamDispatchRsp;
|
} SStreamDispatchRsp;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -556,6 +600,8 @@ int32_t tDecodeStreamCheckpointReadyMsg(SDecoder* pDecoder, SStreamCheckpointRea
|
||||||
typedef struct STaskStatusEntry {
|
typedef struct STaskStatusEntry {
|
||||||
STaskId id;
|
STaskId id;
|
||||||
int32_t status;
|
int32_t status;
|
||||||
|
int32_t stage;
|
||||||
|
int32_t nodeId;
|
||||||
} STaskStatusEntry;
|
} STaskStatusEntry;
|
||||||
|
|
||||||
typedef struct SStreamHbMsg {
|
typedef struct SStreamHbMsg {
|
||||||
|
@ -622,7 +668,7 @@ void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq);
|
||||||
int32_t streamSetupScheduleTrigger(SStreamTask* pTask);
|
int32_t streamSetupScheduleTrigger(SStreamTask* pTask);
|
||||||
|
|
||||||
int32_t streamProcessRunReq(SStreamTask* pTask);
|
int32_t streamProcessRunReq(SStreamTask* pTask);
|
||||||
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg, bool exec);
|
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg);
|
||||||
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code);
|
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code);
|
||||||
|
|
||||||
int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg);
|
int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg);
|
||||||
|
@ -666,7 +712,6 @@ int32_t streamSetStatusUnint(SStreamTask* pTask);
|
||||||
const char* streamGetTaskStatusStr(int32_t status);
|
const char* streamGetTaskStatusStr(int32_t status);
|
||||||
void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta);
|
void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta);
|
||||||
void streamTaskResume(SStreamTask* pTask, SStreamMeta* pMeta);
|
void streamTaskResume(SStreamTask* pTask, SStreamMeta* pMeta);
|
||||||
void streamTaskHalt(SStreamTask* pTask);
|
|
||||||
void streamTaskResumeFromHalt(SStreamTask* pTask);
|
void streamTaskResumeFromHalt(SStreamTask* pTask);
|
||||||
void streamTaskDisablePause(SStreamTask* pTask);
|
void streamTaskDisablePause(SStreamTask* pTask);
|
||||||
void streamTaskEnablePause(SStreamTask* pTask);
|
void streamTaskEnablePause(SStreamTask* pTask);
|
||||||
|
@ -708,6 +753,7 @@ int32_t streamMetaCommit(SStreamMeta* pMeta);
|
||||||
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta);
|
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta);
|
||||||
void streamMetaNotifyClose(SStreamMeta* pMeta);
|
void streamMetaNotifyClose(SStreamMeta* pMeta);
|
||||||
void streamMetaStartHb(SStreamMeta* pMeta);
|
void streamMetaStartHb(SStreamMeta* pMeta);
|
||||||
|
void streamMetaInitForSnode(SStreamMeta* pMeta);
|
||||||
|
|
||||||
// checkpoint
|
// checkpoint
|
||||||
int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
|
int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
|
||||||
|
|
|
@ -165,6 +165,13 @@ static FORCE_INLINE int32_t tarray2SortInsert(void *arr, const void *elePtr, int
|
||||||
#define TARRAY2_FOREACH_PTR_REVERSE(a, ep) \
|
#define TARRAY2_FOREACH_PTR_REVERSE(a, ep) \
|
||||||
for (int32_t __i = (a)->size - 1; __i >= 0 && ((ep) = &(a)->data[__i], 1); __i--)
|
for (int32_t __i = (a)->size - 1; __i >= 0 && ((ep) = &(a)->data[__i], 1); __i--)
|
||||||
|
|
||||||
|
#define TARRAY2_SORT(a, cmp) \
|
||||||
|
do { \
|
||||||
|
if ((a)->size > 1) { \
|
||||||
|
taosSort((a)->data, (a)->size, sizeof((a)->data[0]), (__compar_fn_t)cmp); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -55,6 +55,7 @@ extern int32_t tmrDebugFlag;
|
||||||
extern int32_t uDebugFlag;
|
extern int32_t uDebugFlag;
|
||||||
extern int32_t rpcDebugFlag;
|
extern int32_t rpcDebugFlag;
|
||||||
extern int32_t qDebugFlag;
|
extern int32_t qDebugFlag;
|
||||||
|
extern int32_t stDebugFlag;
|
||||||
extern int32_t wDebugFlag;
|
extern int32_t wDebugFlag;
|
||||||
extern int32_t sDebugFlag;
|
extern int32_t sDebugFlag;
|
||||||
extern int32_t tsdbDebugFlag;
|
extern int32_t tsdbDebugFlag;
|
||||||
|
|
|
@ -33,14 +33,17 @@ adapterName="taosadapter"
|
||||||
benchmarkName="taosBenchmark"
|
benchmarkName="taosBenchmark"
|
||||||
dumpName="taosdump"
|
dumpName="taosdump"
|
||||||
demoName="taosdemo"
|
demoName="taosdemo"
|
||||||
|
xname="taosx"
|
||||||
|
|
||||||
clientName2="taos"
|
clientName2="taos"
|
||||||
serverName2="${clientName2}d"
|
serverName2="${clientName2}d"
|
||||||
configFile2="${clientName2}.cfg"
|
configFile2="${clientName2}.cfg"
|
||||||
productName2="TDengine"
|
productName2="TDengine"
|
||||||
emailName2="taosdata.com"
|
emailName2="taosdata.com"
|
||||||
|
xname2="${clientName2}x"
|
||||||
adapterName2="${clientName2}adapter"
|
adapterName2="${clientName2}adapter"
|
||||||
|
|
||||||
|
explorerName="${clientName2}-explorer"
|
||||||
benchmarkName2="${clientName2}Benchmark"
|
benchmarkName2="${clientName2}Benchmark"
|
||||||
demoName2="${clientName2}demo"
|
demoName2="${clientName2}demo"
|
||||||
dumpName2="${clientName2}dump"
|
dumpName2="${clientName2}dump"
|
||||||
|
@ -235,6 +238,12 @@ function install_bin() {
|
||||||
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
|
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
|
||||||
|
|
||||||
if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
|
if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
|
||||||
|
${csudo}rm -f ${bin_link_dir}/${xname2} || :
|
||||||
|
${csudo}rm -f ${bin_link_dir}/${explorerName} || :
|
||||||
|
|
||||||
|
#Make link
|
||||||
|
[ -x ${install_main_dir}/bin/${xname2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${xname2} ${bin_link_dir}/${xname2} || :
|
||||||
|
[ -x ${install_main_dir}/bin/${explorerName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${explorerName} ${bin_link_dir}/${explorerName} || :
|
||||||
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript2} || :
|
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript2} || :
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -693,9 +702,29 @@ function clean_service_on_systemd() {
|
||||||
fi
|
fi
|
||||||
${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null
|
${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null
|
||||||
${csudo}rm -f ${tarbitratord_service_config}
|
${csudo}rm -f ${tarbitratord_service_config}
|
||||||
# if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
|
|
||||||
# ${csudo}rm -f ${service_config_dir}/${serverName2}.service
|
if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
|
||||||
# fi
|
x_service_config="${service_config_dir}/${xName2}.service"
|
||||||
|
if [ -e "$x_service_config" ]; then
|
||||||
|
if systemctl is-active --quiet ${xName2}; then
|
||||||
|
echo "${productName2} ${xName2} is running, stopping it..."
|
||||||
|
${csudo}systemctl stop ${xName2} &>/dev/null || echo &>/dev/null
|
||||||
|
fi
|
||||||
|
${csudo}systemctl disable ${xName2} &>/dev/null || echo &>/dev/null
|
||||||
|
${csudo}rm -f ${x_service_config}
|
||||||
|
fi
|
||||||
|
|
||||||
|
explorer_service_config="${service_config_dir}/${explorerName2}.service"
|
||||||
|
if [ -e "$explorer_service_config" ]; then
|
||||||
|
if systemctl is-active --quiet ${explorerName2}; then
|
||||||
|
echo "${productName2} ${explorerName2} is running, stopping it..."
|
||||||
|
${csudo}systemctl stop ${explorerName2} &>/dev/null || echo &>/dev/null
|
||||||
|
fi
|
||||||
|
${csudo}systemctl disable ${explorerName2} &>/dev/null || echo &>/dev/null
|
||||||
|
${csudo}rm -f ${explorer_service_config}
|
||||||
|
${csudo}rm -f /etc/${clientName2}/explorer.toml
|
||||||
|
fi
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function install_service_on_systemd() {
|
function install_service_on_systemd() {
|
||||||
|
|
|
@ -123,10 +123,11 @@ function clean_bin() {
|
||||||
${csudo}rm -f ${bin_link_dir}/set_core || :
|
${csudo}rm -f ${bin_link_dir}/set_core || :
|
||||||
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
|
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
|
||||||
${csudo}rm -f ${bin_link_dir}/${keeperName2} || :
|
${csudo}rm -f ${bin_link_dir}/${keeperName2} || :
|
||||||
# ${csudo}rm -f ${bin_link_dir}/${xName2} || :
|
|
||||||
# ${csudo}rm -f ${bin_link_dir}/${explorerName2} || :
|
|
||||||
|
|
||||||
if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
|
if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
|
||||||
|
${csudo}rm -f ${bin_link_dir}/${xName2} || :
|
||||||
|
${csudo}rm -f ${bin_link_dir}/${explorerName2} || :
|
||||||
${csudo}rm -f ${bin_link_dir}/${clientName2} || :
|
${csudo}rm -f ${bin_link_dir}/${clientName2} || :
|
||||||
${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || :
|
${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || :
|
||||||
${csudo}rm -f ${bin_link_dir}/${dumpName2} || :
|
${csudo}rm -f ${bin_link_dir}/${dumpName2} || :
|
||||||
|
@ -195,26 +196,28 @@ function clean_service_on_systemd() {
|
||||||
fi
|
fi
|
||||||
${csudo}systemctl disable ${tarbitrator_service_name} &>/dev/null || echo &>/dev/null
|
${csudo}systemctl disable ${tarbitrator_service_name} &>/dev/null || echo &>/dev/null
|
||||||
|
|
||||||
# x_service_config="${service_config_dir}/${xName2}.service"
|
if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
|
||||||
# if [ -e "$x_service_config" ]; then
|
x_service_config="${service_config_dir}/${xName2}.service"
|
||||||
# if systemctl is-active --quiet ${xName2}; then
|
if [ -e "$x_service_config" ]; then
|
||||||
# echo "${productName2} ${xName2} is running, stopping it..."
|
if systemctl is-active --quiet ${xName2}; then
|
||||||
# ${csudo}systemctl stop ${xName2} &>/dev/null || echo &>/dev/null
|
echo "${productName2} ${xName2} is running, stopping it..."
|
||||||
# fi
|
${csudo}systemctl stop ${xName2} &>/dev/null || echo &>/dev/null
|
||||||
# ${csudo}systemctl disable ${xName2} &>/dev/null || echo &>/dev/null
|
fi
|
||||||
# ${csudo}rm -f ${x_service_config}
|
${csudo}systemctl disable ${xName2} &>/dev/null || echo &>/dev/null
|
||||||
# fi
|
${csudo}rm -f ${x_service_config}
|
||||||
|
fi
|
||||||
|
|
||||||
# explorer_service_config="${service_config_dir}/${explorerName2}.service"
|
explorer_service_config="${service_config_dir}/${explorerName2}.service"
|
||||||
# if [ -e "$explorer_service_config" ]; then
|
if [ -e "$explorer_service_config" ]; then
|
||||||
# if systemctl is-active --quiet ${explorerName2}; then
|
if systemctl is-active --quiet ${explorerName2}; then
|
||||||
# echo "${productName2} ${explorerName2} is running, stopping it..."
|
echo "${productName2} ${explorerName2} is running, stopping it..."
|
||||||
# ${csudo}systemctl stop ${explorerName2} &>/dev/null || echo &>/dev/null
|
${csudo}systemctl stop ${explorerName2} &>/dev/null || echo &>/dev/null
|
||||||
# fi
|
fi
|
||||||
# ${csudo}systemctl disable ${explorerName2} &>/dev/null || echo &>/dev/null
|
${csudo}systemctl disable ${explorerName2} &>/dev/null || echo &>/dev/null
|
||||||
# ${csudo}rm -f ${explorer_service_config}
|
${csudo}rm -f ${explorer_service_config}
|
||||||
# ${csudo}rm -f /etc/${clientName2}/explorer.toml
|
${csudo}rm -f /etc/${clientName2}/explorer.toml
|
||||||
# fi
|
fi
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function clean_service_on_sysvinit() {
|
function clean_service_on_sysvinit() {
|
||||||
|
|
|
@ -832,7 +832,7 @@ TEST(clientCase, projection_query_tables) {
|
||||||
for(int32_t i = 0; i < 1000000; ++i) {
|
for(int32_t i = 0; i < 1000000; ++i) {
|
||||||
char t[512] = {0};
|
char t[512] = {0};
|
||||||
|
|
||||||
sprintf(t, "insert into t1 values(%ld, %ld)", start + i, i);
|
sprintf(t, "insert into t1 values(now, %ld)", i);
|
||||||
while(1) {
|
while(1) {
|
||||||
void* p = taos_query(pConn, t);
|
void* p = taos_query(pConn, t);
|
||||||
code = taos_errno(p);
|
code = taos_errno(p);
|
||||||
|
|
|
@ -158,11 +158,12 @@ static const SSysDbTableSchema streamSchema[] = {
|
||||||
|
|
||||||
static const SSysDbTableSchema streamTaskSchema[] = {
|
static const SSysDbTableSchema streamTaskSchema[] = {
|
||||||
{.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
{.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||||
{.name = "task_id", .bytes = 32, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
{.name = "task_id", .bytes = 16 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||||
{.name = "node_type", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
{.name = "node_type", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||||
{.name = "node_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
|
{.name = "node_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
|
||||||
{.name = "level", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
{.name = "level", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||||
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
{.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||||
|
{.name = "stage", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
|
||||||
};
|
};
|
||||||
|
|
||||||
static const SSysDbTableSchema userTblsSchema[] = {
|
static const SSysDbTableSchema userTblsSchema[] = {
|
||||||
|
|
|
@ -241,7 +241,7 @@ int32_t tsTtlBatchDropNum = 10000; // number of tables dropped per batch
|
||||||
int32_t tsTransPullupInterval = 2;
|
int32_t tsTransPullupInterval = 2;
|
||||||
int32_t tsMqRebalanceInterval = 2;
|
int32_t tsMqRebalanceInterval = 2;
|
||||||
int32_t tsStreamCheckpointTickInterval = 600;
|
int32_t tsStreamCheckpointTickInterval = 600;
|
||||||
int32_t tsStreamNodeCheckInterval = 10;
|
int32_t tsStreamNodeCheckInterval = 30;
|
||||||
int32_t tsTtlUnit = 86400;
|
int32_t tsTtlUnit = 86400;
|
||||||
int32_t tsTtlPushIntervalSec = 10;
|
int32_t tsTtlPushIntervalSec = 10;
|
||||||
int32_t tsTrimVDbIntervalSec = 60 * 60; // interval of trimming db in all vgroups
|
int32_t tsTrimVDbIntervalSec = 60 * 60; // interval of trimming db in all vgroups
|
||||||
|
@ -403,6 +403,7 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) {
|
||||||
if (cfgAddInt32(pCfg, "idxDebugFlag", idxDebugFlag, 0, 255, CFG_SCOPE_SERVER) != 0) return -1;
|
if (cfgAddInt32(pCfg, "idxDebugFlag", idxDebugFlag, 0, 255, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
if (cfgAddInt32(pCfg, "tdbDebugFlag", tdbDebugFlag, 0, 255, CFG_SCOPE_SERVER) != 0) return -1;
|
if (cfgAddInt32(pCfg, "tdbDebugFlag", tdbDebugFlag, 0, 255, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
if (cfgAddInt32(pCfg, "metaDebugFlag", metaDebugFlag, 0, 255, 0) != CFG_SCOPE_SERVER) return -1;
|
if (cfgAddInt32(pCfg, "metaDebugFlag", metaDebugFlag, 0, 255, 0) != CFG_SCOPE_SERVER) return -1;
|
||||||
|
if (cfgAddInt32(pCfg, "stDebugFlag", stDebugFlag, 0, 255, CFG_SCOPE_SERVER) != 0) return -1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -844,6 +845,7 @@ static void taosSetServerLogCfg(SConfig *pCfg) {
|
||||||
idxDebugFlag = cfgGetItem(pCfg, "idxDebugFlag")->i32;
|
idxDebugFlag = cfgGetItem(pCfg, "idxDebugFlag")->i32;
|
||||||
tdbDebugFlag = cfgGetItem(pCfg, "tdbDebugFlag")->i32;
|
tdbDebugFlag = cfgGetItem(pCfg, "tdbDebugFlag")->i32;
|
||||||
metaDebugFlag = cfgGetItem(pCfg, "metaDebugFlag")->i32;
|
metaDebugFlag = cfgGetItem(pCfg, "metaDebugFlag")->i32;
|
||||||
|
stDebugFlag = cfgGetItem(pCfg, "stDebugFlag")->i32;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t taosSetSlowLogScope(char *pScope) {
|
static int32_t taosSetSlowLogScope(char *pScope) {
|
||||||
|
@ -1671,12 +1673,12 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
|
||||||
const char *options[] = {
|
const char *options[] = {
|
||||||
"dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag", "tqDebugFlag",
|
"dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag", "tqDebugFlag",
|
||||||
"fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag", "uDebugFlag",
|
"fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag", "uDebugFlag",
|
||||||
"smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag", "jniDebugFlag",
|
"smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag", "jniDebugFlag", "stDebugFlag",
|
||||||
};
|
};
|
||||||
int32_t *optionVars[] = {
|
int32_t *optionVars[] = {
|
||||||
&dDebugFlag, &vDebugFlag, &mDebugFlag, &wDebugFlag, &sDebugFlag, &tsdbDebugFlag, &tqDebugFlag,
|
&dDebugFlag, &vDebugFlag, &mDebugFlag, &wDebugFlag, &sDebugFlag, &tsdbDebugFlag, &tqDebugFlag,
|
||||||
&fsDebugFlag, &udfDebugFlag, &smaDebugFlag, &idxDebugFlag, &tdbDebugFlag, &tmrDebugFlag, &uDebugFlag,
|
&fsDebugFlag, &udfDebugFlag, &smaDebugFlag, &idxDebugFlag, &tdbDebugFlag, &tmrDebugFlag, &uDebugFlag,
|
||||||
&smaDebugFlag, &rpcDebugFlag, &qDebugFlag, &metaDebugFlag, &jniDebugFlag,
|
&smaDebugFlag, &rpcDebugFlag, &qDebugFlag, &metaDebugFlag, &jniDebugFlag, &stDebugFlag,
|
||||||
};
|
};
|
||||||
|
|
||||||
int32_t optionSize = tListLen(options);
|
int32_t optionSize = tListLen(options);
|
||||||
|
@ -1729,6 +1731,7 @@ void taosSetAllDebugFlag(int32_t flag, bool rewrite) {
|
||||||
taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag, rewrite);
|
taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag, rewrite);
|
||||||
taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag, rewrite);
|
taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag, rewrite);
|
||||||
taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag, rewrite);
|
taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag, rewrite);
|
||||||
|
taosSetDebugFlag(&stDebugFlag, "stDebugFlag", flag, rewrite);
|
||||||
uInfo("all debug flag are set to %d", flag);
|
uInfo("all debug flag are set to %d", flag);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1086,7 +1086,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
|
||||||
const char *options[] = {
|
const char *options[] = {
|
||||||
"debugFlag", "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag",
|
"debugFlag", "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag",
|
||||||
"tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag",
|
"tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag",
|
||||||
"uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag",
|
"uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag", "stDebugFlag",
|
||||||
};
|
};
|
||||||
int32_t optionSize = tListLen(options);
|
int32_t optionSize = tListLen(options);
|
||||||
|
|
||||||
|
|
|
@ -296,8 +296,8 @@ static void setHTasksId(SArray* pTaskList, const SArray* pHTaskList) {
|
||||||
SStreamTask** pStreamTask = taosArrayGet(pTaskList, i);
|
SStreamTask** pStreamTask = taosArrayGet(pTaskList, i);
|
||||||
SStreamTask** pHTask = taosArrayGet(pHTaskList, i);
|
SStreamTask** pHTask = taosArrayGet(pHTaskList, i);
|
||||||
|
|
||||||
(*pStreamTask)->historyTaskId.taskId = (*pHTask)->id.taskId;
|
(*pStreamTask)->hTaskInfo.id.taskId = (*pHTask)->id.taskId;
|
||||||
(*pStreamTask)->historyTaskId.streamId = (*pHTask)->id.streamId;
|
(*pStreamTask)->hTaskInfo.id.streamId = (*pHTask)->id.streamId;
|
||||||
|
|
||||||
(*pHTask)->streamTaskId.taskId = (*pStreamTask)->id.taskId;
|
(*pHTask)->streamTaskId.taskId = (*pStreamTask)->id.taskId;
|
||||||
(*pHTask)->streamTaskId.streamId = (*pStreamTask)->id.streamId;
|
(*pHTask)->streamTaskId.streamId = (*pStreamTask)->id.streamId;
|
||||||
|
|
|
@ -2265,7 +2265,7 @@ static int32_t mndProcessAlterStbReq(SRpcMsg *pReq) {
|
||||||
SName name = {0};
|
SName name = {0};
|
||||||
tNameFromString(&name, alterReq.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
tNameFromString(&name, alterReq.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||||
|
|
||||||
auditRecord(pReq, pMnode->clusterId, "alterStb", name.dbname, alterReq.name, alterReq.sql, alterReq.sqlLen);
|
auditRecord(pReq, pMnode->clusterId, "alterStb", name.dbname, name.tname, alterReq.sql, alterReq.sqlLen);
|
||||||
|
|
||||||
_OVER:
|
_OVER:
|
||||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||||
|
|
|
@ -37,17 +37,18 @@
|
||||||
|
|
||||||
typedef struct SNodeEntry {
|
typedef struct SNodeEntry {
|
||||||
int32_t nodeId;
|
int32_t nodeId;
|
||||||
|
bool stageUpdated; // the stage has been updated due to the leader/follower change or node reboot.
|
||||||
SEpSet epset; // compare the epset to identify the vgroup tranferring between different dnodes.
|
SEpSet epset; // compare the epset to identify the vgroup tranferring between different dnodes.
|
||||||
int64_t hbTimestamp; // second
|
int64_t hbTimestamp; // second
|
||||||
} SNodeEntry;
|
} SNodeEntry;
|
||||||
|
|
||||||
typedef struct SStreamVnodeRevertIndex {
|
typedef struct SStreamExecNodeInfo {
|
||||||
SArray *pNodeEntryList;
|
SArray *pNodeEntryList;
|
||||||
int64_t ts; // snapshot ts
|
int64_t ts; // snapshot ts
|
||||||
SHashObj *pTaskMap;
|
SHashObj *pTaskMap;
|
||||||
SArray *pTaskList;
|
SArray *pTaskList;
|
||||||
TdThreadMutex lock;
|
TdThreadMutex lock;
|
||||||
} SStreamVnodeRevertIndex;
|
} SStreamExecNodeInfo;
|
||||||
|
|
||||||
typedef struct SVgroupChangeInfo {
|
typedef struct SVgroupChangeInfo {
|
||||||
SHashObj *pDBMap;
|
SHashObj *pDBMap;
|
||||||
|
@ -55,7 +56,7 @@ typedef struct SVgroupChangeInfo {
|
||||||
} SVgroupChangeInfo;
|
} SVgroupChangeInfo;
|
||||||
|
|
||||||
static int32_t mndNodeCheckSentinel = 0;
|
static int32_t mndNodeCheckSentinel = 0;
|
||||||
static SStreamVnodeRevertIndex execNodeList;
|
static SStreamExecNodeInfo execNodeList;
|
||||||
|
|
||||||
static int32_t mndStreamActionInsert(SSdb *pSdb, SStreamObj *pStream);
|
static int32_t mndStreamActionInsert(SSdb *pSdb, SStreamObj *pStream);
|
||||||
static int32_t mndStreamActionDelete(SSdb *pSdb, SStreamObj *pStream);
|
static int32_t mndStreamActionDelete(SSdb *pSdb, SStreamObj *pStream);
|
||||||
|
@ -75,7 +76,6 @@ static int32_t mndBuildStreamCheckpointSourceReq2(void **pBuf, int32_t *pLen, in
|
||||||
int64_t streamId, int32_t taskId);
|
int64_t streamId, int32_t taskId);
|
||||||
static int32_t mndProcessNodeCheck(SRpcMsg *pReq);
|
static int32_t mndProcessNodeCheck(SRpcMsg *pReq);
|
||||||
static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg);
|
static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg);
|
||||||
static void keepStreamTasksInBuf(SStreamObj *pStream, SStreamVnodeRevertIndex *pExecNode);
|
|
||||||
|
|
||||||
static SArray *doExtractNodeListFromStream(SMnode *pMnode);
|
static SArray *doExtractNodeListFromStream(SMnode *pMnode);
|
||||||
static SArray *mndTakeVgroupSnapshot(SMnode *pMnode);
|
static SArray *mndTakeVgroupSnapshot(SMnode *pMnode);
|
||||||
|
@ -83,8 +83,8 @@ static SVgroupChangeInfo mndFindChangedNodeInfo(SMnode *pMnode, const SArray *pP
|
||||||
static int32_t mndPersistTransLog(SStreamObj *pStream, STrans *pTrans);
|
static int32_t mndPersistTransLog(SStreamObj *pStream, STrans *pTrans);
|
||||||
static void initTransAction(STransAction *pAction, void *pCont, int32_t contLen, int32_t msgType, const SEpSet *pEpset);
|
static void initTransAction(STransAction *pAction, void *pCont, int32_t contLen, int32_t msgType, const SEpSet *pEpset);
|
||||||
|
|
||||||
static void removeStreamTasksInBuf(SStreamObj* pStream, SStreamVnodeRevertIndex* pExecNode);
|
static void removeStreamTasksInBuf(SStreamObj* pStream, SStreamExecNodeInfo * pExecNode);
|
||||||
static void keepStreamTasksInBuf(SStreamObj *pStream, SStreamVnodeRevertIndex *pExecNode);
|
static void keepStreamTasksInBuf(SStreamObj *pStream, SStreamExecNodeInfo *pExecNode);
|
||||||
|
|
||||||
int32_t mndInitStream(SMnode *pMnode) {
|
int32_t mndInitStream(SMnode *pMnode) {
|
||||||
SSdbTable table = {
|
SSdbTable table = {
|
||||||
|
@ -1151,12 +1151,19 @@ static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (taosArrayGetSize(execNodeList.pNodeEntryList) == 0) {
|
if (taosArrayGetSize(execNodeList.pNodeEntryList) == 0) {
|
||||||
mDebug("end to do stream task node change checking, no vgroup exists, do nothing");
|
mDebug("stream task node change checking done, no vgroups exist, do nothing");
|
||||||
execNodeList.ts = ts;
|
execNodeList.ts = ts;
|
||||||
atomic_store_32(&mndNodeCheckSentinel, 0);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for(int32_t i = 0; i < taosArrayGetSize(execNodeList.pNodeEntryList); ++i) {
|
||||||
|
SNodeEntry* pNodeEntry = taosArrayGet(execNodeList.pNodeEntryList, i);
|
||||||
|
if (pNodeEntry->stageUpdated) {
|
||||||
|
mDebug("stream task not ready due to node update detected, checkpoint not issued");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
SArray *pNodeSnapshot = mndTakeVgroupSnapshot(pMnode);
|
SArray *pNodeSnapshot = mndTakeVgroupSnapshot(pMnode);
|
||||||
|
|
||||||
SVgroupChangeInfo changeInfo = mndFindChangedNodeInfo(pMnode, execNodeList.pNodeEntryList, pNodeSnapshot);
|
SVgroupChangeInfo changeInfo = mndFindChangedNodeInfo(pMnode, execNodeList.pNodeEntryList, pNodeSnapshot);
|
||||||
|
@ -1166,7 +1173,7 @@ static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) {
|
||||||
taosArrayDestroy(pNodeSnapshot);
|
taosArrayDestroy(pNodeSnapshot);
|
||||||
|
|
||||||
if (nodeUpdated) {
|
if (nodeUpdated) {
|
||||||
mDebug("stream task not ready due to node update, not generate checkpoint");
|
mDebug("stream task not ready due to node update, checkpoint not issued");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1183,7 +1190,7 @@ static int32_t mndProcessStreamDoCheckpoint(SRpcMsg *pReq) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pEntry->status != TASK_STATUS__NORMAL) {
|
if (pEntry->status != TASK_STATUS__NORMAL) {
|
||||||
mDebug("s-task:0x%" PRIx64 "-0x%x (nodeId:%d) status:%s not ready, create checkpoint msg not issued",
|
mDebug("s-task:0x%" PRIx64 "-0x%x (nodeId:%d) status:%s not ready, checkpoint msg not issued",
|
||||||
pEntry->id.streamId, (int32_t)pEntry->id.taskId, 0, streamGetTaskStatusStr(pEntry->status));
|
pEntry->id.streamId, (int32_t)pEntry->id.taskId, 0, streamGetTaskStatusStr(pEntry->status));
|
||||||
ready = false;
|
ready = false;
|
||||||
break;
|
break;
|
||||||
|
@ -1574,6 +1581,9 @@ static int32_t mndRetrieveStreamTask(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
||||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
colDataSetVal(pColInfo, numOfRows, (const char *)&status, false);
|
colDataSetVal(pColInfo, numOfRows, (const char *)&status, false);
|
||||||
|
|
||||||
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
|
colDataSetVal(pColInfo, numOfRows, (const char *)&pe->stage, false);
|
||||||
|
|
||||||
numOfRows++;
|
numOfRows++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2029,7 +2039,7 @@ static SVgroupChangeInfo mndFindChangedNodeInfo(SMnode *pMnode, const SArray *pP
|
||||||
SNodeEntry *pCurrent = taosArrayGet(pNodeList, j);
|
SNodeEntry *pCurrent = taosArrayGet(pNodeList, j);
|
||||||
|
|
||||||
if (pCurrent->nodeId == pPrevEntry->nodeId) {
|
if (pCurrent->nodeId == pPrevEntry->nodeId) {
|
||||||
if (isNodeEpsetChanged(&pPrevEntry->epset, &pCurrent->epset)) {
|
if (pPrevEntry->stageUpdated || isNodeEpsetChanged(&pPrevEntry->epset, &pCurrent->epset)) {
|
||||||
const SEp *pPrevEp = GET_ACTIVE_EP(&pPrevEntry->epset);
|
const SEp *pPrevEp = GET_ACTIVE_EP(&pPrevEntry->epset);
|
||||||
|
|
||||||
char buf[256] = {0};
|
char buf[256] = {0};
|
||||||
|
@ -2173,6 +2183,74 @@ static void doExtractTasksFromStream(SMnode *pMnode) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t doRemoveFromTask(SStreamExecNodeInfo* pExecNode, STaskId* pRemovedId) {
|
||||||
|
void *p = taosHashGet(pExecNode->pTaskMap, pRemovedId, sizeof(*pRemovedId));
|
||||||
|
|
||||||
|
if (p != NULL) {
|
||||||
|
taosHashRemove(pExecNode->pTaskMap, pRemovedId, sizeof(*pRemovedId));
|
||||||
|
|
||||||
|
for(int32_t k = 0; k < taosArrayGetSize(pExecNode->pTaskList); ++k) {
|
||||||
|
STaskId* pId = taosArrayGet(pExecNode->pTaskList, k);
|
||||||
|
if (pId->taskId == pRemovedId->taskId && pId->streamId == pRemovedId->streamId) {
|
||||||
|
taosArrayRemove(pExecNode->pTaskList, k);
|
||||||
|
mInfo("s-task:0x%x removed from buffer, remain:%d", (int32_t) pRemovedId->taskId,
|
||||||
|
(int32_t)taosArrayGetSize(pExecNode->pTaskList));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t removeInvalidStreamTask(SArray *pNodeSnapshot) {
|
||||||
|
SArray* pRemoveTaskList = taosArrayInit(4, sizeof(STaskId));
|
||||||
|
|
||||||
|
int32_t numOfTask = taosArrayGetSize(execNodeList.pTaskList);
|
||||||
|
int32_t numOfVgroups = taosArrayGetSize(pNodeSnapshot);
|
||||||
|
for(int32_t i = 0; i < numOfTask; ++i) {
|
||||||
|
STaskId* pId = taosArrayGet(execNodeList.pTaskList, i);
|
||||||
|
STaskStatusEntry* pEntry = taosHashGet(execNodeList.pTaskMap, pId, sizeof(*pId));
|
||||||
|
|
||||||
|
bool existed = false;
|
||||||
|
for(int32_t j = 0; j < numOfVgroups; ++j) {
|
||||||
|
SNodeEntry* pNodeEntry = taosArrayGet(pNodeSnapshot, j);
|
||||||
|
if (pNodeEntry->nodeId == pEntry->nodeId) {
|
||||||
|
existed = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!existed) {
|
||||||
|
taosArrayPush(pRemoveTaskList, pId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for(int32_t i = 0; i < taosArrayGetSize(pRemoveTaskList); ++i) {
|
||||||
|
STaskId* pId = taosArrayGet(pRemoveTaskList, i);
|
||||||
|
doRemoveFromTask(&execNodeList, pId);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t size = taosArrayGetSize(pNodeSnapshot);
|
||||||
|
SArray* pValidNodeEntryList = taosArrayInit(4, sizeof(SNodeEntry));
|
||||||
|
for(int32_t i = 0; i < taosArrayGetSize(execNodeList.pNodeEntryList); ++i) {
|
||||||
|
SNodeEntry* pExisted = taosArrayGet(execNodeList.pNodeEntryList, i);
|
||||||
|
|
||||||
|
for(int32_t j = 0; j < size; ++j) {
|
||||||
|
SNodeEntry* pEntry = taosArrayGet(pNodeSnapshot, j);
|
||||||
|
if (pEntry->nodeId == pExisted->nodeId) {
|
||||||
|
taosArrayPush(pValidNodeEntryList, pExisted);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
execNodeList.pNodeEntryList = taosArrayDestroy(execNodeList.pNodeEntryList);
|
||||||
|
execNodeList.pNodeEntryList = pValidNodeEntryList;
|
||||||
|
|
||||||
|
taosArrayDestroy(pRemoveTaskList);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
// this function runs by only one thread, so it is not multi-thread safe
|
// this function runs by only one thread, so it is not multi-thread safe
|
||||||
static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) {
|
static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
@ -2203,6 +2281,9 @@ static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) {
|
||||||
|
|
||||||
SArray *pNodeSnapshot = mndTakeVgroupSnapshot(pMnode);
|
SArray *pNodeSnapshot = mndTakeVgroupSnapshot(pMnode);
|
||||||
|
|
||||||
|
taosThreadMutexLock(&execNodeList.lock);
|
||||||
|
removeInvalidStreamTask(pNodeSnapshot);
|
||||||
|
|
||||||
SVgroupChangeInfo changeInfo = mndFindChangedNodeInfo(pMnode, execNodeList.pNodeEntryList, pNodeSnapshot);
|
SVgroupChangeInfo changeInfo = mndFindChangedNodeInfo(pMnode, execNodeList.pNodeEntryList, pNodeSnapshot);
|
||||||
if (taosArrayGetSize(changeInfo.pUpdateNodeList) > 0) {
|
if (taosArrayGetSize(changeInfo.pUpdateNodeList) > 0) {
|
||||||
code = mndProcessVgroupChange(pMnode, &changeInfo);
|
code = mndProcessVgroupChange(pMnode, &changeInfo);
|
||||||
|
@ -2219,6 +2300,7 @@ static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) {
|
||||||
taosArrayDestroy(pNodeSnapshot);
|
taosArrayDestroy(pNodeSnapshot);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
taosThreadMutexUnlock(&execNodeList.lock);
|
||||||
taosArrayDestroy(changeInfo.pUpdateNodeList);
|
taosArrayDestroy(changeInfo.pUpdateNodeList);
|
||||||
taosHashCleanup(changeInfo.pDBMap);
|
taosHashCleanup(changeInfo.pDBMap);
|
||||||
|
|
||||||
|
@ -2245,7 +2327,7 @@ static int32_t mndProcessNodeCheck(SRpcMsg *pReq) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void keepStreamTasksInBuf(SStreamObj *pStream, SStreamVnodeRevertIndex *pExecNode) {
|
void keepStreamTasksInBuf(SStreamObj *pStream, SStreamExecNodeInfo *pExecNode) {
|
||||||
int32_t level = taosArrayGetSize(pStream->tasks);
|
int32_t level = taosArrayGetSize(pStream->tasks);
|
||||||
for (int32_t i = 0; i < level; i++) {
|
for (int32_t i = 0; i < level; i++) {
|
||||||
SArray *pLevel = taosArrayGetP(pStream->tasks, i);
|
SArray *pLevel = taosArrayGetP(pStream->tasks, i);
|
||||||
|
@ -2257,16 +2339,21 @@ void keepStreamTasksInBuf(SStreamObj *pStream, SStreamVnodeRevertIndex *pExecNod
|
||||||
STaskId id = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId};
|
STaskId id = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId};
|
||||||
void *p = taosHashGet(pExecNode->pTaskMap, &id, sizeof(id));
|
void *p = taosHashGet(pExecNode->pTaskMap, &id, sizeof(id));
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
STaskStatusEntry entry = {
|
STaskStatusEntry entry = {.id.streamId = pTask->id.streamId,
|
||||||
.id.streamId = pTask->id.streamId, .id.taskId = pTask->id.taskId, .status = TASK_STATUS__STOP};
|
.id.taskId = pTask->id.taskId,
|
||||||
|
.stage = -1,
|
||||||
|
.nodeId = pTask->info.nodeId,
|
||||||
|
.status = TASK_STATUS__STOP};
|
||||||
taosHashPut(pExecNode->pTaskMap, &id, sizeof(id), &entry, sizeof(entry));
|
taosHashPut(pExecNode->pTaskMap, &id, sizeof(id), &entry, sizeof(entry));
|
||||||
taosArrayPush(pExecNode->pTaskList, &id);
|
taosArrayPush(pExecNode->pTaskList, &id);
|
||||||
|
mInfo("s-task:0x%x add into task buffer, total:%d", (int32_t)entry.id.taskId,
|
||||||
|
(int32_t)taosArrayGetSize(pExecNode->pTaskList));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void removeStreamTasksInBuf(SStreamObj* pStream, SStreamVnodeRevertIndex* pExecNode) {
|
void removeStreamTasksInBuf(SStreamObj* pStream, SStreamExecNodeInfo * pExecNode) {
|
||||||
int32_t level = taosArrayGetSize(pStream->tasks);
|
int32_t level = taosArrayGetSize(pStream->tasks);
|
||||||
for (int32_t i = 0; i < level; i++) {
|
for (int32_t i = 0; i < level; i++) {
|
||||||
SArray *pLevel = taosArrayGetP(pStream->tasks, i);
|
SArray *pLevel = taosArrayGetP(pStream->tasks, i);
|
||||||
|
@ -2284,6 +2371,8 @@ void removeStreamTasksInBuf(SStreamObj* pStream, SStreamVnodeRevertIndex* pExecN
|
||||||
STaskId* pId = taosArrayGet(pExecNode->pTaskList, k);
|
STaskId* pId = taosArrayGet(pExecNode->pTaskList, k);
|
||||||
if (pId->taskId == id.taskId && pId->streamId == id.streamId) {
|
if (pId->taskId == id.taskId && pId->streamId == id.streamId) {
|
||||||
taosArrayRemove(pExecNode->pTaskList, k);
|
taosArrayRemove(pExecNode->pTaskList, k);
|
||||||
|
mInfo("s-task:0x%x removed from buffer, remain:%d", (int32_t)id.taskId,
|
||||||
|
(int32_t)taosArrayGetSize(pExecNode->pTaskList));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2299,7 +2388,6 @@ void removeStreamTasksInBuf(SStreamObj* pStream, SStreamVnodeRevertIndex* pExecN
|
||||||
int32_t mndProcessStreamHb(SRpcMsg *pReq) {
|
int32_t mndProcessStreamHb(SRpcMsg *pReq) {
|
||||||
SMnode *pMnode = pReq->info.node;
|
SMnode *pMnode = pReq->info.node;
|
||||||
SStreamHbMsg req = {0};
|
SStreamHbMsg req = {0};
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
|
||||||
|
|
||||||
SDecoder decoder = {0};
|
SDecoder decoder = {0};
|
||||||
tDecoderInit(&decoder, pReq->pCont, pReq->contLen);
|
tDecoderInit(&decoder, pReq->pCont, pReq->contLen);
|
||||||
|
@ -2327,11 +2415,29 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (p->stage != pEntry->stage && pEntry->stage != -1) {
|
||||||
|
int32_t numOfNodes = taosArrayGetSize(execNodeList.pNodeEntryList);
|
||||||
|
for(int32_t j = 0; j < numOfNodes; ++j) {
|
||||||
|
SNodeEntry* pNodeEntry = taosArrayGet(execNodeList.pNodeEntryList, j);
|
||||||
|
if (pNodeEntry->nodeId == pEntry->nodeId) {
|
||||||
|
mInfo("vgId:%d stage updated, from %d to %d, nodeUpdate trigger by s-task:0x%" PRIx64,
|
||||||
|
pEntry->nodeId, pEntry->stage, p->stage, pEntry->id.taskId);
|
||||||
|
|
||||||
|
pNodeEntry->stageUpdated = true;
|
||||||
|
pEntry->stage = p->stage;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pEntry->stage = p->stage;
|
||||||
|
}
|
||||||
|
|
||||||
pEntry->status = p->status;
|
pEntry->status = p->status;
|
||||||
if (p->status != TASK_STATUS__NORMAL) {
|
if (p->status != TASK_STATUS__NORMAL) {
|
||||||
mDebug("received s-task:0x%"PRIx64" not in ready status:%s", p->id.taskId, streamGetTaskStatusStr(p->status));
|
mDebug("received s-task:0x%"PRIx64" not in ready status:%s", p->id.taskId, streamGetTaskStatusStr(p->status));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
taosThreadMutexUnlock(&execNodeList.lock);
|
taosThreadMutexUnlock(&execNodeList.lock);
|
||||||
|
|
||||||
taosArrayDestroy(req.pTaskStatus);
|
taosArrayDestroy(req.pTaskStatus);
|
||||||
|
|
|
@ -37,11 +37,8 @@ void sndEnqueueStreamDispatch(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||||
|
|
||||||
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, req.taskId);
|
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, req.taskId);
|
||||||
if (pTask) {
|
if (pTask) {
|
||||||
SRpcMsg rsp = {
|
SRpcMsg rsp = { .info = pMsg->info, .code = 0 };
|
||||||
.info = pMsg->info,
|
streamProcessDispatchMsg(pTask, &req, &rsp);
|
||||||
.code = 0,
|
|
||||||
};
|
|
||||||
streamProcessDispatchMsg(pTask, &req, &rsp, false);
|
|
||||||
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
||||||
rpcFreeCont(pMsg->pCont);
|
rpcFreeCont(pMsg->pCont);
|
||||||
taosFreeQitem(pMsg);
|
taosFreeQitem(pMsg);
|
||||||
|
@ -115,18 +112,16 @@ SSnode *sndOpen(const char *path, const SSnodeOpt *pOption) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
goto FAIL;
|
goto FAIL;
|
||||||
}
|
}
|
||||||
pSnode->msgCb = pOption->msgCb;
|
|
||||||
|
|
||||||
|
pSnode->msgCb = pOption->msgCb;
|
||||||
pSnode->pMeta = streamMetaOpen(path, pSnode, (FTaskExpand *)sndExpandTask, SNODE_HANDLE, -1);
|
pSnode->pMeta = streamMetaOpen(path, pSnode, (FTaskExpand *)sndExpandTask, SNODE_HANDLE, -1);
|
||||||
if (pSnode->pMeta == NULL) {
|
if (pSnode->pMeta == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
goto FAIL;
|
goto FAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo fix it: send msg to mnode to rollback to an existed checkpoint, and broadcast the rollback msg to all other
|
// todo fix it: send msg to mnode to rollback to an existed checkpoint
|
||||||
// computing nodes.
|
streamMetaInitForSnode(pSnode->pMeta);
|
||||||
pSnode->pMeta->stage = 0;
|
|
||||||
|
|
||||||
return pSnode;
|
return pSnode;
|
||||||
|
|
||||||
FAIL:
|
FAIL:
|
||||||
|
@ -228,7 +223,7 @@ int32_t sndProcessTaskDispatchReq(SSnode *pSnode, SRpcMsg *pMsg, bool exec) {
|
||||||
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, req.taskId);
|
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, req.taskId);
|
||||||
if (pTask) {
|
if (pTask) {
|
||||||
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
||||||
streamProcessDispatchMsg(pTask, &req, &rsp, exec);
|
streamProcessDispatchMsg(pTask, &req, &rsp);
|
||||||
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
|
@ -261,10 +256,11 @@ int32_t sndProcessTaskRetrieveReq(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||||
int32_t sndProcessTaskDispatchRsp(SSnode *pSnode, SRpcMsg *pMsg) {
|
int32_t sndProcessTaskDispatchRsp(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||||
SStreamDispatchRsp *pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
SStreamDispatchRsp *pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||||
|
|
||||||
int32_t taskId = htonl(pRsp->upstreamTaskId);
|
pRsp->upstreamTaskId = htonl(pRsp->upstreamTaskId);
|
||||||
int64_t streamId = htobe64(pRsp->streamId);
|
pRsp->streamId = htobe64(pRsp->streamId);
|
||||||
|
pRsp->msgId = htonl(pRsp->msgId);
|
||||||
|
|
||||||
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, streamId, taskId);
|
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, pRsp->streamId, pRsp->upstreamTaskId);
|
||||||
if (pTask) {
|
if (pTask) {
|
||||||
streamProcessDispatchRsp(pTask, pRsp, pMsg->code);
|
streamProcessDispatchRsp(pTask, pRsp, pMsg->code);
|
||||||
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
||||||
|
@ -361,7 +357,7 @@ int32_t sndProcessStreamTaskCheckReq(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||||
qDebug("s-task:%s status:%s, recv task check req(reqId:0x%" PRIx64 ") task:0x%x (vgId:%d), ready:%d",
|
qDebug("s-task:%s status:%s, recv task check req(reqId:0x%" PRIx64 ") task:0x%x (vgId:%d), ready:%d",
|
||||||
pTask->id.idStr, pStatus, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
pTask->id.idStr, pStatus, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
||||||
} else {
|
} else {
|
||||||
rsp.status = 0;
|
rsp.status = TASK_DOWNSTREAM_NOT_READY;
|
||||||
qDebug("recv task check(taskId:0x%x not built yet) req(reqId:0x%" PRIx64 ") from task:0x%x (vgId:%d), rsp status %d",
|
qDebug("recv task check(taskId:0x%x not built yet) req(reqId:0x%" PRIx64 ") from task:0x%x (vgId:%d), rsp status %d",
|
||||||
taskId, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
taskId, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,6 @@ extern "C" {
|
||||||
#define tqInfo(...) do { if (tqDebugFlag & DEBUG_INFO) { taosPrintLog("TQ ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
|
#define tqInfo(...) do { if (tqDebugFlag & DEBUG_INFO) { taosPrintLog("TQ ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
|
||||||
#define tqDebug(...) do { if (tqDebugFlag & DEBUG_DEBUG) { taosPrintLog("TQ ", DEBUG_DEBUG, tqDebugFlag, __VA_ARGS__); }} while(0)
|
#define tqDebug(...) do { if (tqDebugFlag & DEBUG_DEBUG) { taosPrintLog("TQ ", DEBUG_DEBUG, tqDebugFlag, __VA_ARGS__); }} while(0)
|
||||||
#define tqTrace(...) do { if (tqDebugFlag & DEBUG_TRACE) { taosPrintLog("TQ ", DEBUG_TRACE, tqDebugFlag, __VA_ARGS__); }} while(0)
|
#define tqTrace(...) do { if (tqDebugFlag & DEBUG_TRACE) { taosPrintLog("TQ ", DEBUG_TRACE, tqDebugFlag, __VA_ARGS__); }} while(0)
|
||||||
|
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
|
||||||
typedef struct STqOffsetStore STqOffsetStore;
|
typedef struct STqOffsetStore STqOffsetStore;
|
||||||
|
|
|
@ -224,8 +224,8 @@ int tqPushMsg(STQ*, tmsg_t msgType);
|
||||||
int tqRegisterPushHandle(STQ* pTq, void* handle, SRpcMsg* pMsg);
|
int tqRegisterPushHandle(STQ* pTq, void* handle, SRpcMsg* pMsg);
|
||||||
int tqUnregisterPushHandle(STQ* pTq, void* pHandle);
|
int tqUnregisterPushHandle(STQ* pTq, void* pHandle);
|
||||||
int tqScanWalAsync(STQ* pTq, bool ckPause);
|
int tqScanWalAsync(STQ* pTq, bool ckPause);
|
||||||
int32_t tqProcessStreamCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg);
|
int32_t tqProcessStreamCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp);
|
||||||
int32_t tqProcessStreamTaskCheckpointReadyMsg(STQ* pTq, SRpcMsg* pMsg);
|
int32_t tqProcessTaskCheckpointReadyMsg(STQ* pTq, SRpcMsg* pMsg);
|
||||||
int32_t tqProcessTaskUpdateReq(STQ* pTq, SRpcMsg* pMsg);
|
int32_t tqProcessTaskUpdateReq(STQ* pTq, SRpcMsg* pMsg);
|
||||||
int32_t tqCheckAndRunStreamTaskAsync(STQ* pTq);
|
int32_t tqCheckAndRunStreamTaskAsync(STQ* pTq);
|
||||||
|
|
||||||
|
@ -249,8 +249,8 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t version, char* msg, int32_t msg
|
||||||
int32_t tqProcessTaskDropReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
|
int32_t tqProcessTaskDropReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
|
||||||
int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
|
int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
|
||||||
int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
|
int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
|
||||||
int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg);
|
int32_t tqProcessTaskCheckReq(STQ* pTq, SRpcMsg* pMsg);
|
||||||
int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, SRpcMsg* pMsg);
|
int32_t tqProcessTaskCheckRsp(STQ* pTq, SRpcMsg* pMsg);
|
||||||
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg);
|
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg);
|
||||||
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec);
|
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec);
|
||||||
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg);
|
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg);
|
||||||
|
|
|
@ -876,13 +876,13 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
||||||
" child id:%d, level:%d, status:%s fill-history:%d, related fill-task:0x%x trigger:%" PRId64 " ms",
|
" child id:%d, level:%d, status:%s fill-history:%d, related fill-task:0x%x trigger:%" PRId64 " ms",
|
||||||
vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||||
pTask->info.selfChildId, pTask->info.taskLevel, streamGetTaskStatusStr(pTask->status.taskStatus),
|
pTask->info.selfChildId, pTask->info.taskLevel, streamGetTaskStatusStr(pTask->status.taskStatus),
|
||||||
pTask->info.fillHistory, (int32_t)pTask->historyTaskId.taskId, pTask->info.triggerParam);
|
pTask->info.fillHistory, (int32_t)pTask->hTaskInfo.id.taskId, pTask->info.triggerParam);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) {
|
int32_t tqProcessTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
char* msgStr = pMsg->pCont;
|
char* msgStr = pMsg->pCont;
|
||||||
char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
|
char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
|
||||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||||
|
@ -908,31 +908,31 @@ int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
};
|
};
|
||||||
|
|
||||||
// only the leader node handle the check request
|
// only the leader node handle the check request
|
||||||
if (!pMeta->leader) {
|
if (pMeta->role == NODE_ROLE_FOLLOWER) {
|
||||||
tqError("s-task:0x%x invalid check msg from upstream:0x%x(vgId:%d), vgId:%d is follower, not handle check msg",
|
tqError("s-task:0x%x invalid check msg from upstream:0x%x(vgId:%d), vgId:%d is follower, not handle check status msg",
|
||||||
taskId, req.upstreamTaskId, req.upstreamNodeId, pMeta->vgId);
|
taskId, req.upstreamTaskId, req.upstreamNodeId, pMeta->vgId);
|
||||||
return -1;
|
rsp.status = TASK_DOWNSTREAM_NOT_LEADER;
|
||||||
}
|
} else {
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, req.streamId, taskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pMeta, req.streamId, taskId);
|
||||||
if (pTask != NULL) {
|
if (pTask != NULL) {
|
||||||
rsp.status = streamTaskCheckStatus(pTask, req.upstreamTaskId, req.upstreamNodeId, req.stage);
|
rsp.status = streamTaskCheckStatus(pTask, req.upstreamTaskId, req.upstreamNodeId, req.stage);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
|
||||||
const char* pStatus = streamGetTaskStatusStr(pTask->status.taskStatus);
|
const char* pStatus = streamGetTaskStatusStr(pTask->status.taskStatus);
|
||||||
tqDebug("s-task:%s status:%s, stage:%d recv task check req(reqId:0x%" PRIx64 ") task:0x%x (vgId:%d), ready:%d",
|
tqDebug("s-task:%s status:%s, stage:%d recv task check req(reqId:0x%" PRIx64 ") task:0x%x (vgId:%d), check_status:%d",
|
||||||
pTask->id.idStr, pStatus, rsp.oldStage, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
pTask->id.idStr, pStatus, rsp.oldStage, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
||||||
} else {
|
} else {
|
||||||
rsp.status = 0;
|
rsp.status = TASK_DOWNSTREAM_NOT_READY;
|
||||||
tqDebug("tq recv task check(taskId:0x%" PRIx64 "-0x%x not built yet) req(reqId:0x%" PRIx64
|
tqDebug("tq recv task check(taskId:0x%" PRIx64 "-0x%x not built yet) req(reqId:0x%" PRIx64
|
||||||
") from task:0x%x (vgId:%d), rsp status %d",
|
") from task:0x%x (vgId:%d), rsp check_status %d",
|
||||||
req.streamId, taskId, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
req.streamId, taskId, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return streamSendCheckRsp(pMeta, &req, &rsp, &pMsg->info, taskId);
|
return streamSendCheckRsp(pMeta, &req, &rsp, &pMsg->info, taskId);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, SRpcMsg* pMsg) {
|
int32_t tqProcessTaskCheckRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
char* pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
char* pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||||
int32_t vgId = pTq->pStreamMeta->vgId;
|
int32_t vgId = pTq->pStreamMeta->vgId;
|
||||||
|
@ -1025,8 +1025,8 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms
|
||||||
|
|
||||||
bool restored = pTq->pVnode->restored;
|
bool restored = pTq->pVnode->restored;
|
||||||
if (p != NULL && restored) {
|
if (p != NULL && restored) {
|
||||||
p->taskExecInfo.init = taosGetTimestampMs();
|
p->execInfo.init = taosGetTimestampMs();
|
||||||
tqDebug("s-task:%s set the init ts:%"PRId64, p->id.idStr, p->taskExecInfo.init);
|
tqDebug("s-task:%s set the init ts:%"PRId64, p->id.idStr, p->execInfo.init);
|
||||||
|
|
||||||
streamTaskCheckDownstream(p);
|
streamTaskCheckDownstream(p);
|
||||||
} else if (!restored) {
|
} else if (!restored) {
|
||||||
|
@ -1064,14 +1064,14 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
const char* pStatus = streamGetTaskStatusStr(pTask->status.taskStatus);
|
const char* pStatus = streamGetTaskStatusStr(pTask->status.taskStatus);
|
||||||
tqDebug("s-task:%s start scan-history stage(step 1), status:%s", id, pStatus);
|
tqDebug("s-task:%s start scan-history stage(step 1), status:%s", id, pStatus);
|
||||||
|
|
||||||
if (pTask->taskExecInfo.step1Start == 0) {
|
if (pTask->execInfo.step1Start == 0) {
|
||||||
ASSERT(pTask->status.pauseAllowed == false);
|
ASSERT(pTask->status.pauseAllowed == false);
|
||||||
pTask->taskExecInfo.step1Start = taosGetTimestampMs();
|
pTask->execInfo.step1Start = taosGetTimestampMs();
|
||||||
if (pTask->info.fillHistory == 1) {
|
if (pTask->info.fillHistory == 1) {
|
||||||
streamTaskEnablePause(pTask);
|
streamTaskEnablePause(pTask);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
tqDebug("s-task:%s resume from paused, start ts:%" PRId64, pTask->id.idStr, pTask->taskExecInfo.step1Start);
|
tqDebug("s-task:%s resume from paused, start ts:%" PRId64, pTask->id.idStr, pTask->execInfo.step1Start);
|
||||||
}
|
}
|
||||||
|
|
||||||
// we have to continue retrying to successfully execute the scan history task.
|
// we have to continue retrying to successfully execute the scan history task.
|
||||||
|
@ -1091,7 +1091,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
|
|
||||||
streamScanHistoryData(pTask);
|
streamScanHistoryData(pTask);
|
||||||
if (pTask->status.taskStatus == TASK_STATUS__PAUSE) {
|
if (pTask->status.taskStatus == TASK_STATUS__PAUSE) {
|
||||||
double el = (taosGetTimestampMs() - pTask->taskExecInfo.step1Start) / 1000.0;
|
double el = (taosGetTimestampMs() - pTask->execInfo.step1Start) / 1000.0;
|
||||||
int8_t status = streamTaskSetSchedStatusInActive(pTask);
|
int8_t status = streamTaskSetSchedStatusInActive(pTask);
|
||||||
tqDebug("s-task:%s is paused in the step1, elapsed time:%.2fs, sched-status:%d", pTask->id.idStr, el, status);
|
tqDebug("s-task:%s is paused in the step1, elapsed time:%.2fs, sched-status:%d", pTask->id.idStr, el, status);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
@ -1099,7 +1099,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// the following procedure should be executed, no matter status is stop/pause or not
|
// the following procedure should be executed, no matter status is stop/pause or not
|
||||||
double el = (taosGetTimestampMs() - pTask->taskExecInfo.step1Start) / 1000.0;
|
double el = (taosGetTimestampMs() - pTask->execInfo.step1Start) / 1000.0;
|
||||||
tqDebug("s-task:%s scan-history stage(step 1) ended, elapsed time:%.2fs", id, el);
|
tqDebug("s-task:%s scan-history stage(step 1) ended, elapsed time:%.2fs", id, el);
|
||||||
|
|
||||||
if (pTask->info.fillHistory) {
|
if (pTask->info.fillHistory) {
|
||||||
|
@ -1135,21 +1135,65 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
// now we can stop the stream task execution
|
// now we can stop the stream task execution
|
||||||
int64_t latestVer = 0;
|
int64_t latestVer = 0;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
taosThreadMutexLock(&pStreamTask->lock);
|
taosThreadMutexLock(&pStreamTask->lock);
|
||||||
streamTaskHalt(pStreamTask);
|
int8_t status = pStreamTask->status.taskStatus;
|
||||||
|
if (status == TASK_STATUS__DROPPING || status == TASK_STATUS__STOP) {
|
||||||
|
// return;
|
||||||
|
// do nothing
|
||||||
|
}
|
||||||
|
|
||||||
|
if (status == TASK_STATUS__HALT) {
|
||||||
|
// tqDebug("s-task:%s level:%d sched-status:%d is halt by fill-history task:%s", pStreamTask->id.idStr,
|
||||||
|
// pStreamTask->info.taskLevel, pStreamTask->status.schedStatus, id);
|
||||||
|
// latestVer = walReaderGetCurrentVer(pStreamTask->exec.pWalReader);
|
||||||
|
//
|
||||||
|
// taosThreadMutexUnlock(&pStreamTask->lock);
|
||||||
|
// break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pStreamTask->status.taskStatus == TASK_STATUS__CK) {
|
||||||
|
qDebug("s-task:%s status:%s during generating checkpoint, wait for 1sec and retry set status:halt",
|
||||||
|
pStreamTask->id.idStr, streamGetTaskStatusStr(TASK_STATUS__CK));
|
||||||
|
taosThreadMutexUnlock(&pStreamTask->lock);
|
||||||
|
taosMsleep(1000);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// upgrade to halt status
|
||||||
|
if (status == TASK_STATUS__PAUSE) {
|
||||||
|
qDebug("s-task:%s upgrade status to %s from %s", pStreamTask->id.idStr, streamGetTaskStatusStr(TASK_STATUS__HALT),
|
||||||
|
streamGetTaskStatusStr(TASK_STATUS__PAUSE));
|
||||||
|
} else {
|
||||||
|
qDebug("s-task:%s halt task, prev status:%s", pStreamTask->id.idStr, streamGetTaskStatusStr(status));
|
||||||
|
}
|
||||||
|
|
||||||
|
pStreamTask->status.keepTaskStatus = status;
|
||||||
|
pStreamTask->status.taskStatus = TASK_STATUS__HALT;
|
||||||
|
|
||||||
tqDebug("s-task:%s level:%d sched-status:%d is halt by fill-history task:%s", pStreamTask->id.idStr,
|
tqDebug("s-task:%s level:%d sched-status:%d is halt by fill-history task:%s", pStreamTask->id.idStr,
|
||||||
pStreamTask->info.taskLevel, pStreamTask->status.schedStatus, id);
|
pStreamTask->info.taskLevel, pStreamTask->status.schedStatus, id);
|
||||||
latestVer = walReaderGetCurrentVer(pStreamTask->exec.pWalReader);
|
latestVer = walReaderGetCurrentVer(pStreamTask->exec.pWalReader);
|
||||||
|
|
||||||
taosThreadMutexUnlock(&pStreamTask->lock);
|
taosThreadMutexUnlock(&pStreamTask->lock);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
// if it's an source task, extract the last version in wal.
|
// if it's an source task, extract the last version in wal.
|
||||||
pRange = &pTask->dataRange.range;
|
pRange = &pTask->dataRange.range;
|
||||||
done = streamHistoryTaskSetVerRangeStep2(pTask, latestVer);
|
done = streamHistoryTaskSetVerRangeStep2(pTask, latestVer);
|
||||||
|
|
||||||
if (done) {
|
if (done) {
|
||||||
pTask->taskExecInfo.step2Start = taosGetTimestampMs();
|
pTask->execInfo.step2Start = taosGetTimestampMs();
|
||||||
qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, 0.0);
|
qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, 0.0);
|
||||||
streamTaskPutTranstateIntoInputQ(pTask);
|
streamTaskPutTranstateIntoInputQ(pTask);
|
||||||
|
|
||||||
|
if (pTask->status.taskStatus == TASK_STATUS__PAUSE) {
|
||||||
|
pTask->status.keepTaskStatus = TASK_STATUS__NORMAL;
|
||||||
|
qDebug("s-task:%s prev status is %s, update the kept status to be:%s when after step 2", id,
|
||||||
|
streamGetTaskStatusStr(TASK_STATUS__PAUSE), streamGetTaskStatusStr(pTask->status.keepTaskStatus));
|
||||||
|
}
|
||||||
|
|
||||||
streamTryExec(pTask); // exec directly
|
streamTryExec(pTask); // exec directly
|
||||||
} else {
|
} else {
|
||||||
STimeWindow* pWindow = &pTask->dataRange.window;
|
STimeWindow* pWindow = &pTask->dataRange.window;
|
||||||
|
@ -1159,7 +1203,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
pStreamTask->id.idStr);
|
pStreamTask->id.idStr);
|
||||||
ASSERT(pTask->status.schedStatus == TASK_SCHED_STATUS__WAITING);
|
ASSERT(pTask->status.schedStatus == TASK_SCHED_STATUS__WAITING);
|
||||||
|
|
||||||
pTask->taskExecInfo.step2Start = taosGetTimestampMs();
|
pTask->execInfo.step2Start = taosGetTimestampMs();
|
||||||
streamSetParamForStreamScannerStep2(pTask, pRange, pWindow);
|
streamSetParamForStreamScannerStep2(pTask, pRange, pWindow);
|
||||||
|
|
||||||
int64_t dstVer = pTask->dataRange.range.minVer;
|
int64_t dstVer = pTask->dataRange.range.minVer;
|
||||||
|
@ -1183,7 +1227,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
} else {
|
} else {
|
||||||
STimeWindow* pWindow = &pTask->dataRange.window;
|
STimeWindow* pWindow = &pTask->dataRange.window;
|
||||||
|
|
||||||
if (pTask->historyTaskId.taskId == 0) {
|
if (pTask->hTaskInfo.id.taskId == 0) {
|
||||||
*pWindow = (STimeWindow){INT64_MIN, INT64_MAX};
|
*pWindow = (STimeWindow){INT64_MIN, INT64_MAX};
|
||||||
tqDebug(
|
tqDebug(
|
||||||
"s-task:%s scan-history in stream time window completed, no related fill-history task, reset the time "
|
"s-task:%s scan-history in stream time window completed, no related fill-history task, reset the time "
|
||||||
|
@ -1324,10 +1368,12 @@ int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec) {
|
||||||
tDecodeStreamDispatchReq(&decoder, &req);
|
tDecodeStreamDispatchReq(&decoder, &req);
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
|
||||||
|
tqDebug("s-task:0x%x recv dispatch msg from 0x%x(vgId:%d)", req.taskId, req.upstreamTaskId, req.upstreamNodeId);
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.taskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.taskId);
|
||||||
if (pTask) {
|
if (pTask) {
|
||||||
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
||||||
streamProcessDispatchMsg(pTask, &req, &rsp, exec);
|
streamProcessDispatchMsg(pTask, &req, &rsp);
|
||||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1346,6 +1392,8 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
pRsp->streamId = htobe64(pRsp->streamId);
|
pRsp->streamId = htobe64(pRsp->streamId);
|
||||||
pRsp->downstreamTaskId = htonl(pRsp->downstreamTaskId);
|
pRsp->downstreamTaskId = htonl(pRsp->downstreamTaskId);
|
||||||
pRsp->downstreamNodeId = htonl(pRsp->downstreamNodeId);
|
pRsp->downstreamNodeId = htonl(pRsp->downstreamNodeId);
|
||||||
|
pRsp->stage = htobe64(pRsp->stage);
|
||||||
|
pRsp->msgId = htonl(pRsp->msgId);
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pRsp->streamId, pRsp->upstreamTaskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pRsp->streamId, pRsp->upstreamTaskId);
|
||||||
if (pTask) {
|
if (pTask) {
|
||||||
|
@ -1393,12 +1441,12 @@ int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
||||||
streamTaskPause(pTask, pMeta);
|
streamTaskPause(pTask, pMeta);
|
||||||
|
|
||||||
SStreamTask* pHistoryTask = NULL;
|
SStreamTask* pHistoryTask = NULL;
|
||||||
if (pTask->historyTaskId.taskId != 0) {
|
if (pTask->hTaskInfo.id.taskId != 0) {
|
||||||
pHistoryTask = streamMetaAcquireTask(pMeta, pTask->historyTaskId.streamId, pTask->historyTaskId.taskId);
|
pHistoryTask = streamMetaAcquireTask(pMeta, pTask->hTaskInfo.id.streamId, pTask->hTaskInfo.id.taskId);
|
||||||
if (pHistoryTask == NULL) {
|
if (pHistoryTask == NULL) {
|
||||||
tqError("vgId:%d process pause req, failed to acquire fill-history task:0x%" PRIx64
|
tqError("vgId:%d process pause req, failed to acquire fill-history task:0x%" PRIx64
|
||||||
", it may have been dropped already",
|
", it may have been dropped already",
|
||||||
pMeta->vgId, pTask->historyTaskId.taskId);
|
pMeta->vgId, pTask->hTaskInfo.id.taskId);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
|
||||||
// since task is in [STOP|DROPPING] state, it is safe to assume the pause is active
|
// since task is in [STOP|DROPPING] state, it is safe to assume the pause is active
|
||||||
|
@ -1467,7 +1515,7 @@ int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamTask* pHistoryTask =
|
SStreamTask* pHistoryTask =
|
||||||
streamMetaAcquireTask(pTq->pStreamMeta, pTask->historyTaskId.streamId, pTask->historyTaskId.taskId);
|
streamMetaAcquireTask(pTq->pStreamMeta, pTask->hTaskInfo.id.streamId, pTask->hTaskInfo.id.taskId);
|
||||||
if (pHistoryTask) {
|
if (pHistoryTask) {
|
||||||
code = tqProcessTaskResumeImpl(pTq, pHistoryTask, sversion, pReq->igUntreated);
|
code = tqProcessTaskResumeImpl(pTq, pHistoryTask, sversion, pReq->igUntreated);
|
||||||
}
|
}
|
||||||
|
@ -1528,10 +1576,16 @@ int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) {
|
||||||
int32_t taskId = req.taskId;
|
int32_t taskId = req.taskId;
|
||||||
tqDebug("vgId:%d receive dispatch msg to s-task:0x%" PRIx64 "-0x%x", vgId, req.streamId, taskId);
|
tqDebug("vgId:%d receive dispatch msg to s-task:0x%" PRIx64 "-0x%x", vgId, req.streamId, taskId);
|
||||||
|
|
||||||
|
// for test purpose
|
||||||
|
// if (req.type == STREAM_INPUT__CHECKPOINT_TRIGGER) {
|
||||||
|
// code = TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||||
|
// goto FAIL;
|
||||||
|
// }
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, taskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, taskId);
|
||||||
if (pTask != NULL) {
|
if (pTask != NULL) {
|
||||||
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
||||||
streamProcessDispatchMsg(pTask, &req, &rsp, false);
|
streamProcessDispatchMsg(pTask, &req, &rsp);
|
||||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||||
rpcFreeCont(pMsg->pCont);
|
rpcFreeCont(pMsg->pCont);
|
||||||
taosFreeQitem(pMsg);
|
taosFreeQitem(pMsg);
|
||||||
|
@ -1559,12 +1613,16 @@ FAIL:
|
||||||
}
|
}
|
||||||
|
|
||||||
pRspHead->vgId = htonl(req.upstreamNodeId);
|
pRspHead->vgId = htonl(req.upstreamNodeId);
|
||||||
|
ASSERT(pRspHead->vgId != 0);
|
||||||
|
|
||||||
SStreamDispatchRsp* pRsp = POINTER_SHIFT(pRspHead, sizeof(SMsgHead));
|
SStreamDispatchRsp* pRsp = POINTER_SHIFT(pRspHead, sizeof(SMsgHead));
|
||||||
pRsp->streamId = htobe64(req.streamId);
|
pRsp->streamId = htobe64(req.streamId);
|
||||||
pRsp->upstreamTaskId = htonl(req.upstreamTaskId);
|
pRsp->upstreamTaskId = htonl(req.upstreamTaskId);
|
||||||
pRsp->upstreamNodeId = htonl(req.upstreamNodeId);
|
pRsp->upstreamNodeId = htonl(req.upstreamNodeId);
|
||||||
pRsp->downstreamNodeId = htonl(pVnode->config.vgId);
|
pRsp->downstreamNodeId = htonl(pVnode->config.vgId);
|
||||||
pRsp->downstreamTaskId = htonl(req.taskId);
|
pRsp->downstreamTaskId = htonl(req.taskId);
|
||||||
|
pRsp->msgId = htonl(req.msgId);
|
||||||
|
pRsp->stage = htobe64(req.stage);
|
||||||
pRsp->inputStatus = TASK_OUTPUT_STATUS__NORMAL;
|
pRsp->inputStatus = TASK_OUTPUT_STATUS__NORMAL;
|
||||||
|
|
||||||
int32_t len = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp);
|
int32_t len = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp);
|
||||||
|
@ -1578,16 +1636,22 @@ FAIL:
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo error code cannot be return, since this is invoked by an mnode-launched transaction.
|
// todo error code cannot be return, since this is invoked by an mnode-launched transaction.
|
||||||
int32_t tqProcessStreamCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg) {
|
int32_t tqProcessStreamCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) {
|
||||||
int32_t vgId = TD_VID(pTq->pVnode);
|
int32_t vgId = TD_VID(pTq->pVnode);
|
||||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||||
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
|
||||||
|
// disable auto rsp to source
|
||||||
|
pRsp->info.handle = NULL;
|
||||||
|
|
||||||
SStreamCheckpointSourceReq req = {0};
|
SStreamCheckpointSourceReq req = {0};
|
||||||
if (!vnodeIsRoleLeader(pTq->pVnode)) {
|
if (!vnodeIsRoleLeader(pTq->pVnode)) {
|
||||||
tqDebug("vgId:%d not leader node, ignore checkpoint-source msg", vgId);
|
tqDebug("vgId:%d not leader, ignore checkpoint-source msg", vgId);
|
||||||
|
SRpcMsg rsp = {0};
|
||||||
|
buildCheckpointSourceRsp(&req, &pMsg->info, &rsp, 0);
|
||||||
|
tmsgSendRsp(&rsp); // error occurs
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1597,6 +1661,9 @@ int32_t tqProcessStreamCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
code = TSDB_CODE_MSG_DECODE_ERROR;
|
code = TSDB_CODE_MSG_DECODE_ERROR;
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
tqError("vgId:%d failed to decode checkpoint-source msg, code:%s", vgId, tstrerror(code));
|
tqError("vgId:%d failed to decode checkpoint-source msg, code:%s", vgId, tstrerror(code));
|
||||||
|
SRpcMsg rsp = {0};
|
||||||
|
buildCheckpointSourceRsp(&req, &pMsg->info, &rsp, 0);
|
||||||
|
tmsgSendRsp(&rsp); // error occurs
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
@ -1605,6 +1672,9 @@ int32_t tqProcessStreamCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
tqError("vgId:%d failed to find s-task:0x%x, ignore checkpoint msg. it may have been destroyed already", vgId,
|
tqError("vgId:%d failed to find s-task:0x%x, ignore checkpoint msg. it may have been destroyed already", vgId,
|
||||||
req.taskId);
|
req.taskId);
|
||||||
|
SRpcMsg rsp = {0};
|
||||||
|
buildCheckpointSourceRsp(&req, &pMsg->info, &rsp, 0);
|
||||||
|
tmsgSendRsp(&rsp); // error occurs
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1620,6 +1690,22 @@ int32_t tqProcessStreamCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
taosThreadMutexLock(&pTask->lock);
|
||||||
|
if (pTask->status.taskStatus == TASK_STATUS__HALT) {
|
||||||
|
qError("s-task:%s not ready for checkpoint, since it is halt, ignore this checkpoint:%" PRId64 ", set it failure",
|
||||||
|
pTask->id.idStr, req.checkpointId);
|
||||||
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
|
||||||
|
SRpcMsg rsp = {0};
|
||||||
|
buildCheckpointSourceRsp(&req, &pMsg->info, &rsp, 0);
|
||||||
|
tmsgSendRsp(&rsp); // error occurs
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
streamProcessCheckpointSourceReq(pTask, &req);
|
||||||
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
|
||||||
int32_t total = 0;
|
int32_t total = 0;
|
||||||
taosWLockLatch(&pMeta->lock);
|
taosWLockLatch(&pMeta->lock);
|
||||||
|
|
||||||
|
@ -1632,23 +1718,25 @@ int32_t tqProcessStreamCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
total = pMeta->numOfStreamTasks;
|
total = pMeta->numOfStreamTasks;
|
||||||
taosWUnLockLatch(&pMeta->lock);
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
|
|
||||||
qDebug("s-task:%s (vgId:%d) level:%d receive checkpoint-source msg, chkpt:%" PRId64 ", total checkpoint req:%d",
|
qInfo("s-task:%s (vgId:%d) level:%d receive checkpoint-source msg chkpt:%" PRId64 ", total checkpoint reqs:%d",
|
||||||
pTask->id.idStr, vgId, pTask->info.taskLevel, req.checkpointId, total);
|
pTask->id.idStr, vgId, pTask->info.taskLevel, req.checkpointId, total);
|
||||||
|
|
||||||
code = streamAddCheckpointSourceRspMsg(&req, &pMsg->info, pTask, 1);
|
code = streamAddCheckpointSourceRspMsg(&req, &pMsg->info, pTask, 1);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
SRpcMsg rsp = {0};
|
||||||
|
buildCheckpointSourceRsp(&req, &pMsg->info, &rsp, 0);
|
||||||
|
tmsgSendRsp(&rsp); // error occurs
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo: when generating checkpoint, no new tasks are allowed to add into current Vnode
|
// todo: when generating checkpoint, no new tasks are allowed to add into current Vnode
|
||||||
// todo: when generating checkpoint, leader of mnode has transfer to other DNode?
|
// todo: when generating checkpoint, leader of mnode has transfer to other DNode?
|
||||||
streamProcessCheckpointSourceReq(pTask, &req);
|
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
// downstream task has complete the stream task checkpoint procedure, let's start the handle the rsp by execute task
|
// downstream task has complete the stream task checkpoint procedure, let's start the handle the rsp by execute task
|
||||||
int32_t tqProcessStreamTaskCheckpointReadyMsg(STQ* pTq, SRpcMsg* pMsg) {
|
int32_t tqProcessTaskCheckpointReadyMsg(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
int32_t vgId = TD_VID(pTq->pVnode);
|
int32_t vgId = TD_VID(pTq->pVnode);
|
||||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||||
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||||
|
@ -1722,8 +1810,8 @@ int32_t tqProcessTaskUpdateReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
streamSetStatusNormal(pTask);
|
streamSetStatusNormal(pTask);
|
||||||
|
|
||||||
SStreamTask** ppHTask = NULL;
|
SStreamTask** ppHTask = NULL;
|
||||||
if (pTask->historyTaskId.taskId != 0) {
|
if (pTask->hTaskInfo.id.taskId != 0) {
|
||||||
ppHTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pTask->historyTaskId, sizeof(pTask->historyTaskId));
|
ppHTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id));
|
||||||
if (ppHTask == NULL || *ppHTask == NULL) {
|
if (ppHTask == NULL || *ppHTask == NULL) {
|
||||||
tqError("vgId:%d failed to acquire fill-history task:0x%x when handling update, it may have been dropped already",
|
tqError("vgId:%d failed to acquire fill-history task:0x%x when handling update, it may have been dropped already",
|
||||||
pMeta->vgId, req.taskId);
|
pMeta->vgId, req.taskId);
|
||||||
|
@ -1760,16 +1848,19 @@ int32_t tqProcessTaskUpdateReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
// possibly only handle the stream task.
|
// possibly only handle the stream task.
|
||||||
int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
||||||
int32_t updateTasks = taosHashGetSize(pMeta->pUpdateTaskSet);
|
int32_t updateTasks = taosHashGetSize(pMeta->pUpdateTaskSet);
|
||||||
if (updateTasks < numOfTasks) {
|
|
||||||
pMeta->taskWillbeLaunched = 1;
|
|
||||||
|
|
||||||
tqDebug("vgId:%d closed tasks:%d, unclosed:%d", vgId, updateTasks, (numOfTasks - updateTasks));
|
pMeta->startInfo.startedAfterNodeUpdate = 1;
|
||||||
|
|
||||||
|
if (updateTasks < numOfTasks) {
|
||||||
|
tqDebug("vgId:%d closed tasks:%d, unclosed:%d, all tasks will be started when nodeEp update completed", vgId,
|
||||||
|
updateTasks, (numOfTasks - updateTasks));
|
||||||
taosWUnLockLatch(&pMeta->lock);
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
} else {
|
} else {
|
||||||
taosHashClear(pMeta->pUpdateTaskSet);
|
taosHashClear(pMeta->pUpdateTaskSet);
|
||||||
|
|
||||||
if (!pTq->pVnode->restored) {
|
if (!pTq->pVnode->restored) {
|
||||||
tqDebug("vgId:%d vnode restore not completed, not restart the tasks", vgId);
|
tqDebug("vgId:%d vnode restore not completed, not restart the tasks, clear the start after nodeUpdate flag", vgId);
|
||||||
|
pMeta->startInfo.startedAfterNodeUpdate = 0;
|
||||||
taosWUnLockLatch(&pMeta->lock);
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
} else {
|
} else {
|
||||||
tqDebug("vgId:%d tasks are all updated and stopped, restart them", vgId);
|
tqDebug("vgId:%d tasks are all updated and stopped, restart them", vgId);
|
||||||
|
@ -1791,14 +1882,13 @@ int32_t tqProcessTaskUpdateReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vnodeIsRoleLeader(pTq->pVnode) && !tsDisableStream) {
|
if (vnodeIsRoleLeader(pTq->pVnode) && !tsDisableStream) {
|
||||||
vInfo("vgId:%d, restart all stream tasks", vgId);
|
vInfo("vgId:%d restart all stream tasks after all tasks being updated", vgId);
|
||||||
tqStartStreamTasks(pTq);
|
tqStartStreamTasks(pTq);
|
||||||
tqCheckAndRunStreamTaskAsync(pTq);
|
tqCheckAndRunStreamTaskAsync(pTq);
|
||||||
} else {
|
} else {
|
||||||
vInfo("vgId:%d, follower node not start stream tasks", vgId);
|
vInfo("vgId:%d, follower node not start stream tasks", vgId);
|
||||||
}
|
}
|
||||||
|
|
||||||
pMeta->taskWillbeLaunched = 0;
|
|
||||||
taosWUnLockLatch(&pMeta->lock);
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,23 +24,22 @@ typedef struct STableSinkInfo {
|
||||||
tstr name;
|
tstr name;
|
||||||
} STableSinkInfo;
|
} STableSinkInfo;
|
||||||
|
|
||||||
|
static int32_t tsAscendingSortFn(const void* p1, const void* p2);
|
||||||
static int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDataBlock, char* stbFullName,
|
static int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDataBlock, char* stbFullName,
|
||||||
SSubmitTbData* pTableData);
|
SSubmitTbData* pTableData);
|
||||||
static int32_t setDstTableDataPayload(SStreamTask* pTask, int32_t blockIndex, SSDataBlock* pDataBlock,
|
static int32_t setDstTableDataPayload(SStreamTask* pTask, int32_t blockIndex, SSDataBlock* pDataBlock,
|
||||||
SSubmitTbData* pTableData);
|
SSubmitTbData* pTableData);
|
||||||
static int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask,
|
static int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask,
|
||||||
int64_t suid);
|
int64_t suid);
|
||||||
static int32_t tqBuildSubmitReq(SSubmitReq2* pSubmitReq, int32_t vgId, void** pMsg, int32_t* msgLen);
|
static int32_t doBuildAndSendSubmitMsg(SVnode* pVnode, SStreamTask* pTask, SSubmitReq2* pReq, int32_t numOfBlocks);
|
||||||
static int32_t tsAscendingSortFn(const void* p1, const void* p2);
|
static int32_t buildSubmitMsgImpl(SSubmitReq2* pSubmitReq, int32_t vgId, void** pMsg, int32_t* msgLen);
|
||||||
static int32_t doConvertRows(SSubmitTbData* pTableData, STSchema* pTSchema, SSDataBlock* pDataBlock, const char* id);
|
static int32_t doConvertRows(SSubmitTbData* pTableData, STSchema* pTSchema, SSDataBlock* pDataBlock, const char* id);
|
||||||
static int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkInfo* pTableSinkInfo,
|
static int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkInfo* pTableSinkInfo,
|
||||||
const char* dstTableName, int64_t* uid);
|
const char* dstTableName, int64_t* uid);
|
||||||
static int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, const char* id);
|
static int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, const char* id);
|
||||||
static SVCreateTbReq* buildAutoCreateTableReq(char* stbFullName, int64_t suid, int32_t numOfCols,
|
|
||||||
SSDataBlock* pDataBlock);
|
|
||||||
static bool isValidDstChildTable(SMetaReader* pReader, int32_t vgId, const char* ctbName, int64_t suid);
|
|
||||||
static int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, const char* id);
|
static int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, const char* id);
|
||||||
static int32_t doBuildAndSendSubmitMsg(SVnode* pVnode, SStreamTask* pTask, SSubmitReq2* pReq, int32_t numOfBlocks);
|
static bool isValidDstChildTable(SMetaReader* pReader, int32_t vgId, const char* ctbName, int64_t suid);
|
||||||
|
static SVCreateTbReq* buildAutoCreateTableReq(char* stbFullName, int64_t suid, int32_t numOfCols, SSDataBlock* pDataBlock);
|
||||||
|
|
||||||
int32_t tqBuildDeleteReq(const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq,
|
int32_t tqBuildDeleteReq(const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq,
|
||||||
const char* pIdStr) {
|
const char* pIdStr) {
|
||||||
|
@ -255,7 +254,7 @@ int32_t doBuildAndSendSubmitMsg(SVnode* pVnode, SStreamTask* pTask, SSubmitReq2*
|
||||||
void* pBuf = NULL;
|
void* pBuf = NULL;
|
||||||
int32_t numOfFinalBlocks = taosArrayGetSize(pReq->aSubmitTbData);
|
int32_t numOfFinalBlocks = taosArrayGetSize(pReq->aSubmitTbData);
|
||||||
|
|
||||||
int32_t code = tqBuildSubmitReq(pReq, vgId, &pBuf, &len);
|
int32_t code = buildSubmitMsgImpl(pReq, vgId, &pBuf, &len);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
tqError("s-task:%s build submit msg failed, vgId:%d, code:%s", id, vgId, tstrerror(code));
|
tqError("s-task:%s build submit msg failed, vgId:%d, code:%s", id, vgId, tstrerror(code));
|
||||||
return code;
|
return code;
|
||||||
|
@ -270,14 +269,15 @@ int32_t doBuildAndSendSubmitMsg(SVnode* pVnode, SStreamTask* pTask, SSubmitReq2*
|
||||||
tqError("s-task:%s failed to put into write-queue since %s", id, terrstr());
|
tqError("s-task:%s failed to put into write-queue since %s", id, terrstr());
|
||||||
}
|
}
|
||||||
|
|
||||||
pTask->sinkRecorder.numOfSubmit += 1;
|
SSinkRecorder* pRec = &pTask->execInfo.sink;
|
||||||
|
|
||||||
if ((pTask->sinkRecorder.numOfSubmit % 5000) == 0) {
|
pRec->numOfSubmit += 1;
|
||||||
SSinkTaskRecorder* pRec = &pTask->sinkRecorder;
|
if ((pRec->numOfSubmit % 1000) == 0) {
|
||||||
double el = (taosGetTimestampMs() - pTask->taskExecInfo.start) / 1000.0;
|
double el = (taosGetTimestampMs() - pTask->execInfo.start) / 1000.0;
|
||||||
tqInfo("s-task:%s vgId:%d write %" PRId64 " blocks (%" PRId64 " rows) in %" PRId64
|
tqInfo("s-task:%s vgId:%d write %" PRId64 " blocks (%" PRId64 " rows) in %" PRId64
|
||||||
" submit into dst table, duration:%.2f Sec.",
|
" submit into dst table, %.2fMiB duration:%.2f Sec.",
|
||||||
pTask->id.idStr, vgId, pRec->numOfBlocks, pRec->numOfRows, pRec->numOfSubmit, el);
|
pTask->id.idStr, vgId, pRec->numOfBlocks, pRec->numOfRows, pRec->numOfSubmit, SIZE_IN_MiB(pRec->dataSize),
|
||||||
|
el);
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -327,7 +327,7 @@ int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, c
|
||||||
taosArrayDestroy(pExisted->aRowP);
|
taosArrayDestroy(pExisted->aRowP);
|
||||||
pExisted->aRowP = pFinal;
|
pExisted->aRowP = pFinal;
|
||||||
|
|
||||||
tqDebug("s-task:%s rows merged, final rows:%d, uid:%" PRId64 ", existed auto-create table:%d, new-block:%d", id,
|
tqTrace("s-task:%s rows merged, final rows:%d, uid:%" PRId64 ", existed auto-create table:%d, new-block:%d", id,
|
||||||
(int32_t)taosArrayGetSize(pFinal), pExisted->uid, (pExisted->pCreateTbReq != NULL), (pNew->pCreateTbReq != NULL));
|
(int32_t)taosArrayGetSize(pFinal), pExisted->uid, (pExisted->pCreateTbReq != NULL), (pNew->pCreateTbReq != NULL));
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -462,7 +462,7 @@ int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo,
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tqBuildSubmitReq(SSubmitReq2* pSubmitReq, int32_t vgId, void** pMsg, int32_t* msgLen) {
|
int32_t buildSubmitMsgImpl(SSubmitReq2* pSubmitReq, int32_t vgId, void** pMsg, int32_t* msgLen) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
void* pBuf = NULL;
|
void* pBuf = NULL;
|
||||||
*msgLen = 0;
|
*msgLen = 0;
|
||||||
|
@ -672,10 +672,10 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
|
||||||
pTableData->uid = pTableSinkInfo->uid;
|
pTableData->uid = pTableSinkInfo->uid;
|
||||||
|
|
||||||
if (pTableData->uid == 0) {
|
if (pTableData->uid == 0) {
|
||||||
tqDebug("s-task:%s cached tableInfo uid is invalid, acquire it from meta", id);
|
tqTrace("s-task:%s cached tableInfo uid is invalid, acquire it from meta", id);
|
||||||
return doWaitForDstTableCreated(pVnode, pTask, pTableSinkInfo, dstTableName, &pTableData->uid);
|
return doWaitForDstTableCreated(pVnode, pTask, pTableSinkInfo, dstTableName, &pTableData->uid);
|
||||||
} else {
|
} else {
|
||||||
tqDebug("s-task:%s set the dstTable uid from cache:%"PRId64, id, pTableData->uid);
|
tqTrace("s-task:%s set the dstTable uid from cache:%"PRId64, id, pTableData->uid);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// The auto-create option will always set to be open for those submit messages, which arrive during the period
|
// The auto-create option will always set to be open for those submit messages, which arrive during the period
|
||||||
|
@ -740,7 +740,7 @@ int32_t setDstTableDataPayload(SStreamTask* pTask, int32_t blockIndex, SSDataBlo
|
||||||
}
|
}
|
||||||
|
|
||||||
taosArraySort(pTableData->aRowP, tsAscendingSortFn);
|
taosArraySort(pTableData->aRowP, tsAscendingSortFn);
|
||||||
tqDebug("s-task:%s build submit msg for dstTable:%s, numOfRows:%d", id, dstTableName, numOfRows);
|
tqTrace("s-task:%s build submit msg for dstTable:%s, numOfRows:%d", id, dstTableName, numOfRows);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -755,8 +755,8 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
|
|
||||||
if (pTask->taskExecInfo.start == 0) {
|
if (pTask->execInfo.start == 0) {
|
||||||
pTask->taskExecInfo.start = taosGetTimestampMs();
|
pTask->execInfo.start = taosGetTimestampMs();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool onlySubmitData = true;
|
bool onlySubmitData = true;
|
||||||
|
@ -785,7 +785,7 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
|
||||||
} else if (pDataBlock->info.type == STREAM_CHECKPOINT) {
|
} else if (pDataBlock->info.type == STREAM_CHECKPOINT) {
|
||||||
continue;
|
continue;
|
||||||
} else {
|
} else {
|
||||||
pTask->sinkRecorder.numOfBlocks += 1;
|
pTask->execInfo.sink.numOfBlocks += 1;
|
||||||
|
|
||||||
SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))};
|
SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))};
|
||||||
if (submitReq.aSubmitTbData == NULL) {
|
if (submitReq.aSubmitTbData == NULL) {
|
||||||
|
@ -824,6 +824,8 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
|
||||||
bool hasSubmit = false;
|
bool hasSubmit = false;
|
||||||
for (int32_t i = 0; i < numOfBlocks; i++) {
|
for (int32_t i = 0; i < numOfBlocks; i++) {
|
||||||
if (streamTaskShouldStop(&pTask->status)) {
|
if (streamTaskShouldStop(&pTask->status)) {
|
||||||
|
taosHashCleanup(pTableIndexMap);
|
||||||
|
tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -833,7 +835,7 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
|
||||||
}
|
}
|
||||||
|
|
||||||
hasSubmit = true;
|
hasSubmit = true;
|
||||||
pTask->sinkRecorder.numOfBlocks += 1;
|
pTask->execInfo.sink.numOfBlocks += 1;
|
||||||
uint64_t groupId = pDataBlock->info.id.groupId;
|
uint64_t groupId = pDataBlock->info.id.groupId;
|
||||||
|
|
||||||
SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version};
|
SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version};
|
||||||
|
@ -867,7 +869,7 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pTask->sinkRecorder.numOfRows += pDataBlock->info.rows;
|
pTask->execInfo.sink.numOfRows += pDataBlock->info.rows;
|
||||||
}
|
}
|
||||||
|
|
||||||
taosHashCleanup(pTableIndexMap);
|
taosHashCleanup(pTableIndexMap);
|
||||||
|
|
|
@ -105,6 +105,7 @@ int32_t streamStateSnapRead(SStreamStateReader* pReader, uint8_t** ppData) {
|
||||||
pHdr->size = len;
|
pHdr->size = len;
|
||||||
memcpy(pHdr->data, rowData, len);
|
memcpy(pHdr->data, rowData, len);
|
||||||
tqDebug("vgId:%d, vnode stream-state snapshot read data success", TD_VID(pReader->pTq->pVnode));
|
tqDebug("vgId:%d, vnode stream-state snapshot read data success", TD_VID(pReader->pTq->pVnode));
|
||||||
|
taosMemoryFree(rowData);
|
||||||
return code;
|
return code;
|
||||||
|
|
||||||
_err:
|
_err:
|
||||||
|
|
|
@ -72,6 +72,8 @@ int32_t tqCheckAndRunStreamTask(STQ* pTq) {
|
||||||
SArray* pTaskList = NULL;
|
SArray* pTaskList = NULL;
|
||||||
taosWLockLatch(&pMeta->lock);
|
taosWLockLatch(&pMeta->lock);
|
||||||
pTaskList = taosArrayDup(pMeta->pTaskList, NULL);
|
pTaskList = taosArrayDup(pMeta->pTaskList, NULL);
|
||||||
|
taosHashClear(pMeta->startInfo.pReadyTaskSet);
|
||||||
|
pMeta->startInfo.ts = taosGetTimestampMs();
|
||||||
taosWUnLockLatch(&pMeta->lock);
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
|
|
||||||
// broadcast the check downstream tasks msg
|
// broadcast the check downstream tasks msg
|
||||||
|
@ -96,8 +98,8 @@ int32_t tqCheckAndRunStreamTask(STQ* pTq) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
pTask->taskExecInfo.init = taosGetTimestampMs();
|
pTask->execInfo.init = taosGetTimestampMs();
|
||||||
tqDebug("s-task:%s start check downstream tasks, set the init ts:%"PRId64, pTask->id.idStr, pTask->taskExecInfo.init);
|
tqDebug("s-task:%s start check downstream tasks, set the init ts:%"PRId64, pTask->id.idStr, pTask->execInfo.init);
|
||||||
|
|
||||||
streamSetStatusNormal(pTask);
|
streamSetStatusNormal(pTask);
|
||||||
streamTaskCheckDownstream(pTask);
|
streamTaskCheckDownstream(pTask);
|
||||||
|
@ -231,7 +233,6 @@ int32_t tqStartStreamTasks(STQ* pTq) {
|
||||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||||
|
|
||||||
tqDebug("vgId:%d start all %d stream task(s)", vgId, numOfTasks);
|
tqDebug("vgId:%d start all %d stream task(s)", vgId, numOfTasks);
|
||||||
|
|
||||||
if (numOfTasks == 0) {
|
if (numOfTasks == 0) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -306,7 +307,7 @@ void handleFillhistoryScanComplete(SStreamTask* pTask, int64_t ver) {
|
||||||
", not scan wal anymore, add transfer-state block into inputQ",
|
", not scan wal anymore, add transfer-state block into inputQ",
|
||||||
id, ver, maxVer);
|
id, ver, maxVer);
|
||||||
|
|
||||||
double el = (taosGetTimestampMs() - pTask->taskExecInfo.step2Start) / 1000.0;
|
double el = (taosGetTimestampMs() - pTask->execInfo.step2Start) / 1000.0;
|
||||||
qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, el);
|
qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, el);
|
||||||
/*int32_t code = */streamTaskPutTranstateIntoInputQ(pTask);
|
/*int32_t code = */streamTaskPutTranstateIntoInputQ(pTask);
|
||||||
/*int32_t code = */streamSchedExec(pTask);
|
/*int32_t code = */streamSchedExec(pTask);
|
||||||
|
@ -355,7 +356,7 @@ int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle) {
|
||||||
|
|
||||||
const char* pStatus = streamGetTaskStatusStr(status);
|
const char* pStatus = streamGetTaskStatusStr(status);
|
||||||
if (status != TASK_STATUS__NORMAL) {
|
if (status != TASK_STATUS__NORMAL) {
|
||||||
tqDebug("s-task:%s not ready for new submit block from wal, status:%s", pTask->id.idStr, pStatus);
|
tqTrace("s-task:%s not ready for new submit block from wal, status:%s", pTask->id.idStr, pStatus);
|
||||||
streamMetaReleaseTask(pStreamMeta, pTask);
|
streamMetaReleaseTask(pStreamMeta, pTask);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,11 +39,17 @@ int32_t tqInitDataRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) {
|
||||||
void tqUpdateNodeStage(STQ* pTq, bool isLeader) {
|
void tqUpdateNodeStage(STQ* pTq, bool isLeader) {
|
||||||
SSyncState state = syncGetState(pTq->pVnode->sync);
|
SSyncState state = syncGetState(pTq->pVnode->sync);
|
||||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||||
tqDebug("vgId:%d update the meta stage:%"PRId64", prev:%"PRId64" leader:%d", pMeta->vgId, state.term, pMeta->stage, isLeader);
|
int64_t stage = pMeta->stage;
|
||||||
|
|
||||||
pMeta->stage = state.term;
|
pMeta->stage = state.term;
|
||||||
pMeta->leader = isLeader;
|
pMeta->role = (isLeader)? NODE_ROLE_LEADER:NODE_ROLE_FOLLOWER;
|
||||||
if (isLeader) {
|
if (isLeader) {
|
||||||
|
tqInfo("vgId:%d update meta stage:%" PRId64 ", prev:%" PRId64 " leader:%d, start to send Hb", pMeta->vgId,
|
||||||
|
state.term, stage, isLeader);
|
||||||
streamMetaStartHb(pMeta);
|
streamMetaStartHb(pMeta);
|
||||||
|
} else {
|
||||||
|
tqInfo("vgId:%d update meta stage:%" PRId64 " prev:%" PRId64 " leader:%d", pMeta->vgId, state.term, stage,
|
||||||
|
isLeader);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -236,6 +236,7 @@ static int32_t load_fs(STsdb *pTsdb, const char *fname, TFileSetArray *arr) {
|
||||||
code = TARRAY2_APPEND(arr, fset);
|
code = TARRAY2_APPEND(arr, fset);
|
||||||
TSDB_CHECK_CODE(code, lino, _exit);
|
TSDB_CHECK_CODE(code, lino, _exit);
|
||||||
}
|
}
|
||||||
|
TARRAY2_SORT(arr, tsdbTFileSetCmprFn);
|
||||||
} else {
|
} else {
|
||||||
code = TSDB_CODE_FILE_CORRUPTED;
|
code = TSDB_CODE_FILE_CORRUPTED;
|
||||||
TSDB_CHECK_CODE(code, lino, _exit);
|
TSDB_CHECK_CODE(code, lino, _exit);
|
||||||
|
|
|
@ -189,6 +189,7 @@ static int32_t tsdbJsonToSttLvl(STsdb *pTsdb, const cJSON *json, SSttLvl **lvl)
|
||||||
code = TARRAY2_APPEND(lvl[0]->fobjArr, fobj);
|
code = TARRAY2_APPEND(lvl[0]->fobjArr, fobj);
|
||||||
if (code) return code;
|
if (code) return code;
|
||||||
}
|
}
|
||||||
|
TARRAY2_SORT(lvl[0]->fobjArr, tsdbTFileObjCmpr);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -268,6 +269,7 @@ int32_t tsdbJsonToTFileSet(STsdb *pTsdb, const cJSON *json, STFileSet **fset) {
|
||||||
code = TARRAY2_APPEND((*fset)->lvlArr, lvl);
|
code = TARRAY2_APPEND((*fset)->lvlArr, lvl);
|
||||||
if (code) return code;
|
if (code) return code;
|
||||||
}
|
}
|
||||||
|
TARRAY2_SORT((*fset)->lvlArr, tsdbSttLvlCmprFn);
|
||||||
} else {
|
} else {
|
||||||
return TSDB_CODE_FILE_CORRUPTED;
|
return TSDB_CODE_FILE_CORRUPTED;
|
||||||
}
|
}
|
||||||
|
|
|
@ -447,6 +447,9 @@ int32_t tsdbMerge(void *arg) {
|
||||||
_exit:
|
_exit:
|
||||||
if (code) {
|
if (code) {
|
||||||
TSDB_ERROR_LOG(TD_VID(tsdb->pVnode), lino, code);
|
TSDB_ERROR_LOG(TD_VID(tsdb->pVnode), lino, code);
|
||||||
|
tsdbFatal("vgId:%d, failed to merge stt files since %s. code:%d", TD_VID(tsdb->pVnode), terrstr(), code);
|
||||||
|
taosMsleep(100);
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
} else if (merger->ctx->opened) {
|
} else if (merger->ctx->opened) {
|
||||||
tsdbDebug("vgId:%d %s done", TD_VID(tsdb->pVnode), __func__);
|
tsdbDebug("vgId:%d %s done", TD_VID(tsdb->pVnode), __func__);
|
||||||
}
|
}
|
||||||
|
|
|
@ -4899,6 +4899,7 @@ int32_t tsdbTakeReadSnap2(STsdbReader* pReader, _query_reseek_func_t reseek, STs
|
||||||
// alloc
|
// alloc
|
||||||
STsdbReadSnap* pSnap = (STsdbReadSnap*)taosMemoryCalloc(1, sizeof(STsdbReadSnap));
|
STsdbReadSnap* pSnap = (STsdbReadSnap*)taosMemoryCalloc(1, sizeof(STsdbReadSnap));
|
||||||
if (pSnap == NULL) {
|
if (pSnap == NULL) {
|
||||||
|
taosThreadRwlockUnlock(&pTsdb->rwLock);
|
||||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
goto _exit;
|
goto _exit;
|
||||||
}
|
}
|
||||||
|
@ -4908,6 +4909,7 @@ int32_t tsdbTakeReadSnap2(STsdbReader* pReader, _query_reseek_func_t reseek, STs
|
||||||
pSnap->pMem = pTsdb->mem;
|
pSnap->pMem = pTsdb->mem;
|
||||||
pSnap->pNode = taosMemoryMalloc(sizeof(*pSnap->pNode));
|
pSnap->pNode = taosMemoryMalloc(sizeof(*pSnap->pNode));
|
||||||
if (pSnap->pNode == NULL) {
|
if (pSnap->pNode == NULL) {
|
||||||
|
taosThreadRwlockUnlock(&pTsdb->rwLock);
|
||||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
goto _exit;
|
goto _exit;
|
||||||
}
|
}
|
||||||
|
@ -4922,6 +4924,7 @@ int32_t tsdbTakeReadSnap2(STsdbReader* pReader, _query_reseek_func_t reseek, STs
|
||||||
pSnap->pIMem = pTsdb->imem;
|
pSnap->pIMem = pTsdb->imem;
|
||||||
pSnap->pINode = taosMemoryMalloc(sizeof(*pSnap->pINode));
|
pSnap->pINode = taosMemoryMalloc(sizeof(*pSnap->pINode));
|
||||||
if (pSnap->pINode == NULL) {
|
if (pSnap->pINode == NULL) {
|
||||||
|
taosThreadRwlockUnlock(&pTsdb->rwLock);
|
||||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
goto _exit;
|
goto _exit;
|
||||||
}
|
}
|
||||||
|
@ -4932,6 +4935,9 @@ int32_t tsdbTakeReadSnap2(STsdbReader* pReader, _query_reseek_func_t reseek, STs
|
||||||
tsdbRefMemTable(pTsdb->imem, pSnap->pINode);
|
tsdbRefMemTable(pTsdb->imem, pSnap->pINode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// unlock
|
||||||
|
taosThreadRwlockUnlock(&pTsdb->rwLock);
|
||||||
|
|
||||||
// fs
|
// fs
|
||||||
code = tsdbFSCreateRefSnapshot(pTsdb->pFS, &pSnap->pfSetArray);
|
code = tsdbFSCreateRefSnapshot(pTsdb->pFS, &pSnap->pfSetArray);
|
||||||
if (code == TSDB_CODE_SUCCESS) {
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
|
@ -4939,8 +4945,6 @@ int32_t tsdbTakeReadSnap2(STsdbReader* pReader, _query_reseek_func_t reseek, STs
|
||||||
}
|
}
|
||||||
|
|
||||||
_exit:
|
_exit:
|
||||||
taosThreadRwlockUnlock(&pTsdb->rwLock);
|
|
||||||
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
tsdbError("vgId:%d take read snapshot failed, code:%s", TD_VID(pTsdb->pVnode), tstrerror(code));
|
tsdbError("vgId:%d take read snapshot failed, code:%s", TD_VID(pTsdb->pVnode), tstrerror(code));
|
||||||
|
|
||||||
|
|
|
@ -602,7 +602,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg
|
||||||
vnodeProcessDropIndexReq(pVnode, ver, pReq, len, pRsp);
|
vnodeProcessDropIndexReq(pVnode, ver, pReq, len, pRsp);
|
||||||
break;
|
break;
|
||||||
case TDMT_VND_STREAM_CHECK_POINT_SOURCE:
|
case TDMT_VND_STREAM_CHECK_POINT_SOURCE:
|
||||||
tqProcessStreamCheckPointSourceReq(pVnode->pTq, pMsg);
|
tqProcessStreamCheckPointSourceReq(pVnode->pTq, pMsg, pRsp);
|
||||||
break;
|
break;
|
||||||
case TDMT_VND_STREAM_TASK_UPDATE:
|
case TDMT_VND_STREAM_TASK_UPDATE:
|
||||||
tqProcessTaskUpdateReq(pVnode->pTq, pMsg);
|
tqProcessTaskUpdateReq(pVnode->pTq, pMsg);
|
||||||
|
@ -754,9 +754,9 @@ int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo)
|
||||||
case TDMT_STREAM_TASK_DISPATCH_RSP:
|
case TDMT_STREAM_TASK_DISPATCH_RSP:
|
||||||
return tqProcessTaskDispatchRsp(pVnode->pTq, pMsg);
|
return tqProcessTaskDispatchRsp(pVnode->pTq, pMsg);
|
||||||
case TDMT_VND_STREAM_TASK_CHECK:
|
case TDMT_VND_STREAM_TASK_CHECK:
|
||||||
return tqProcessStreamTaskCheckReq(pVnode->pTq, pMsg);
|
return tqProcessTaskCheckReq(pVnode->pTq, pMsg);
|
||||||
case TDMT_VND_STREAM_TASK_CHECK_RSP:
|
case TDMT_VND_STREAM_TASK_CHECK_RSP:
|
||||||
return tqProcessStreamTaskCheckRsp(pVnode->pTq, pMsg);
|
return tqProcessTaskCheckRsp(pVnode->pTq, pMsg);
|
||||||
case TDMT_STREAM_RETRIEVE:
|
case TDMT_STREAM_RETRIEVE:
|
||||||
return tqProcessTaskRetrieveReq(pVnode->pTq, pMsg);
|
return tqProcessTaskRetrieveReq(pVnode->pTq, pMsg);
|
||||||
case TDMT_STREAM_RETRIEVE_RSP:
|
case TDMT_STREAM_RETRIEVE_RSP:
|
||||||
|
@ -768,7 +768,7 @@ int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo)
|
||||||
case TDMT_VND_STREAM_SCAN_HISTORY_FINISH_RSP:
|
case TDMT_VND_STREAM_SCAN_HISTORY_FINISH_RSP:
|
||||||
return tqProcessTaskScanHistoryFinishRsp(pVnode->pTq, pMsg);
|
return tqProcessTaskScanHistoryFinishRsp(pVnode->pTq, pMsg);
|
||||||
case TDMT_STREAM_TASK_CHECKPOINT_READY:
|
case TDMT_STREAM_TASK_CHECKPOINT_READY:
|
||||||
return tqProcessStreamTaskCheckpointReadyMsg(pVnode->pTq, pMsg);
|
return tqProcessTaskCheckpointReadyMsg(pVnode->pTq, pMsg);
|
||||||
default:
|
default:
|
||||||
vError("unknown msg type:%d in stream queue", pMsg->msgType);
|
vError("unknown msg type:%d in stream queue", pMsg->msgType);
|
||||||
return TSDB_CODE_APP_ERROR;
|
return TSDB_CODE_APP_ERROR;
|
||||||
|
@ -1442,11 +1442,8 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t ver, void *pReq, in
|
||||||
|
|
||||||
SColData *pColData = (SColData *)taosArrayGet(pSubmitTbData->aCol, 0);
|
SColData *pColData = (SColData *)taosArrayGet(pSubmitTbData->aCol, 0);
|
||||||
TSKEY *aKey = (TSKEY *)(pColData->pData);
|
TSKEY *aKey = (TSKEY *)(pColData->pData);
|
||||||
vDebug("vgId:%d submit %d rows data, uid:%"PRId64, TD_VID(pVnode), pColData->nVal, pSubmitTbData->uid);
|
|
||||||
|
|
||||||
for (int32_t iRow = 0; iRow < pColData->nVal; iRow++) {
|
for (int32_t iRow = 0; iRow < pColData->nVal; iRow++) {
|
||||||
vDebug("vgId:%d uid:%"PRId64" ts:%"PRId64, TD_VID(pVnode), pSubmitTbData->uid, aKey[iRow]);
|
|
||||||
|
|
||||||
if (aKey[iRow] < minKey || aKey[iRow] > maxKey || (iRow > 0 && aKey[iRow] <= aKey[iRow - 1])) {
|
if (aKey[iRow] < minKey || aKey[iRow] > maxKey || (iRow > 0 && aKey[iRow] <= aKey[iRow - 1])) {
|
||||||
code = TSDB_CODE_INVALID_MSG;
|
code = TSDB_CODE_INVALID_MSG;
|
||||||
vError("vgId:%d %s failed since %s, version:%" PRId64, TD_VID(pVnode), __func__, tstrerror(terrno), ver);
|
vError("vgId:%d %s failed since %s, version:%" PRId64, TD_VID(pVnode), __func__, tstrerror(terrno), ver);
|
||||||
|
@ -1457,10 +1454,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t ver, void *pReq, in
|
||||||
} else {
|
} else {
|
||||||
int32_t nRow = TARRAY_SIZE(pSubmitTbData->aRowP);
|
int32_t nRow = TARRAY_SIZE(pSubmitTbData->aRowP);
|
||||||
SRow **aRow = (SRow **)TARRAY_DATA(pSubmitTbData->aRowP);
|
SRow **aRow = (SRow **)TARRAY_DATA(pSubmitTbData->aRowP);
|
||||||
|
|
||||||
vDebug("vgId:%d submit %d rows data, uid:%"PRId64, TD_VID(pVnode), nRow, pSubmitTbData->uid);
|
|
||||||
for (int32_t iRow = 0; iRow < nRow; ++iRow) {
|
for (int32_t iRow = 0; iRow < nRow; ++iRow) {
|
||||||
vDebug("vgId:%d uid:%"PRId64" ts:%"PRId64, TD_VID(pVnode), pSubmitTbData->uid, aRow[iRow]->ts);
|
|
||||||
|
|
||||||
if (aRow[iRow]->ts < minKey || aRow[iRow]->ts > maxKey || (iRow > 0 && aRow[iRow]->ts <= aRow[iRow - 1]->ts)) {
|
if (aRow[iRow]->ts < minKey || aRow[iRow]->ts > maxKey || (iRow > 0 && aRow[iRow]->ts <= aRow[iRow - 1]->ts)) {
|
||||||
code = TSDB_CODE_INVALID_MSG;
|
code = TSDB_CODE_INVALID_MSG;
|
||||||
|
|
|
@ -551,13 +551,8 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx)
|
||||||
walApplyVer(pVnode->pWal, commitIdx);
|
walApplyVer(pVnode->pWal, commitIdx);
|
||||||
pVnode->restored = true;
|
pVnode->restored = true;
|
||||||
|
|
||||||
if (pVnode->pTq->pStreamMeta->taskWillbeLaunched) {
|
|
||||||
vInfo("vgId:%d, sync restore finished, stream tasks will be launched by other thread", vgId);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
taosWLockLatch(&pVnode->pTq->pStreamMeta->lock);
|
taosWLockLatch(&pVnode->pTq->pStreamMeta->lock);
|
||||||
if (pVnode->pTq->pStreamMeta->taskWillbeLaunched) {
|
if (pVnode->pTq->pStreamMeta->startInfo.startedAfterNodeUpdate) {
|
||||||
vInfo("vgId:%d, sync restore finished, stream tasks will be launched by other thread", vgId);
|
vInfo("vgId:%d, sync restore finished, stream tasks will be launched by other thread", vgId);
|
||||||
taosWUnLockLatch(&pVnode->pTq->pStreamMeta->lock);
|
taosWUnLockLatch(&pVnode->pTq->pStreamMeta->lock);
|
||||||
return;
|
return;
|
||||||
|
@ -612,10 +607,10 @@ static void vnodeBecomeLearner(const SSyncFSM *pFsm) {
|
||||||
|
|
||||||
static void vnodeBecomeLeader(const SSyncFSM *pFsm) {
|
static void vnodeBecomeLeader(const SSyncFSM *pFsm) {
|
||||||
SVnode *pVnode = pFsm->data;
|
SVnode *pVnode = pFsm->data;
|
||||||
|
vDebug("vgId:%d, become leader", pVnode->config.vgId);
|
||||||
if (pVnode->pTq) {
|
if (pVnode->pTq) {
|
||||||
tqUpdateNodeStage(pVnode->pTq, true);
|
tqUpdateNodeStage(pVnode->pTq, true);
|
||||||
}
|
}
|
||||||
vDebug("vgId:%d, become leader", pVnode->config.vgId);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool vnodeApplyQueueEmpty(const SSyncFSM *pFsm) {
|
static bool vnodeApplyQueueEmpty(const SSyncFSM *pFsm) {
|
||||||
|
|
|
@ -165,7 +165,7 @@ int32_t ctgRefreshTbMeta(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgTbMetaCtx*
|
||||||
}
|
}
|
||||||
|
|
||||||
if (CTG_IS_META_NULL(output->metaType)) {
|
if (CTG_IS_META_NULL(output->metaType)) {
|
||||||
ctgError("no tbmeta got, tbNmae:%s", tNameGetTableName(ctx->pName));
|
ctgError("no tbmeta got, tbName:%s", tNameGetTableName(ctx->pName));
|
||||||
ctgRemoveTbMetaFromCache(pCtg, ctx->pName, false);
|
ctgRemoveTbMetaFromCache(pCtg, ctx->pName, false);
|
||||||
CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST);
|
CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1171,7 +1171,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
|
||||||
STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out;
|
STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out;
|
||||||
|
|
||||||
if (CTG_IS_META_NULL(pOut->metaType)) {
|
if (CTG_IS_META_NULL(pOut->metaType)) {
|
||||||
ctgError("no tbmeta got, tbNmae:%s", tNameGetTableName(pName));
|
ctgError("no tbmeta got, tbName:%s", tNameGetTableName(pName));
|
||||||
ctgRemoveTbMetaFromCache(pCtg, pName, false);
|
ctgRemoveTbMetaFromCache(pCtg, pName, false);
|
||||||
CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST);
|
CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST);
|
||||||
}
|
}
|
||||||
|
@ -1341,7 +1341,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
|
||||||
STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out;
|
STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out;
|
||||||
|
|
||||||
if (CTG_IS_META_NULL(pOut->metaType)) {
|
if (CTG_IS_META_NULL(pOut->metaType)) {
|
||||||
ctgTaskError("no tbmeta got, tbNmae:%s", tNameGetTableName(pName));
|
ctgTaskError("no tbmeta got, tbName:%s", tNameGetTableName(pName));
|
||||||
ctgRemoveTbMetaFromCache(pCtg, pName, false);
|
ctgRemoveTbMetaFromCache(pCtg, pName, false);
|
||||||
CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST);
|
CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST);
|
||||||
}
|
}
|
||||||
|
|
|
@ -334,6 +334,7 @@ static int32_t createDataBlockForEmptyInput(SOperatorInfo* pOperator, SSDataBloc
|
||||||
colInfo.info.type = TSDB_DATA_TYPE_NULL;
|
colInfo.info.type = TSDB_DATA_TYPE_NULL;
|
||||||
colInfo.info.bytes = 1;
|
colInfo.info.bytes = 1;
|
||||||
|
|
||||||
|
|
||||||
SExprInfo* pOneExpr = &pOperator->exprSupp.pExprInfo[i];
|
SExprInfo* pOneExpr = &pOperator->exprSupp.pExprInfo[i];
|
||||||
for (int32_t j = 0; j < pOneExpr->base.numOfParams; ++j) {
|
for (int32_t j = 0; j < pOneExpr->base.numOfParams; ++j) {
|
||||||
SFunctParam* pFuncParam = &pOneExpr->base.pParam[j];
|
SFunctParam* pFuncParam = &pOneExpr->base.pParam[j];
|
||||||
|
@ -353,6 +354,10 @@ static int32_t createDataBlockForEmptyInput(SOperatorInfo* pOperator, SSDataBloc
|
||||||
}
|
}
|
||||||
|
|
||||||
blockDataEnsureCapacity(pBlock, pBlock->info.rows);
|
blockDataEnsureCapacity(pBlock, pBlock->info.rows);
|
||||||
|
for (int32_t i = 0; i < blockDataGetNumOfCols(pBlock); ++i) {
|
||||||
|
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
|
||||||
|
colDataSetNULL(pColInfoData, 0);
|
||||||
|
}
|
||||||
*ppBlock = pBlock;
|
*ppBlock = pBlock;
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
|
|
@ -2217,7 +2217,9 @@ FETCH_NEXT_BLOCK:
|
||||||
if (pSDB) {
|
if (pSDB) {
|
||||||
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
|
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
|
||||||
pSDB->info.type = pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE ? STREAM_NORMAL : STREAM_PULL_DATA;
|
pSDB->info.type = pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE ? STREAM_NORMAL : STREAM_PULL_DATA;
|
||||||
|
if (!pInfo->igCheckUpdate && pInfo->pUpdateInfo) {
|
||||||
checkUpdateData(pInfo, true, pSDB, false);
|
checkUpdateData(pInfo, true, pSDB, false);
|
||||||
|
}
|
||||||
printSpecDataBlock(pSDB, getStreamOpName(pOperator->operatorType), "update", GET_TASKID(pTaskInfo));
|
printSpecDataBlock(pSDB, getStreamOpName(pOperator->operatorType), "update", GET_TASKID(pTaskInfo));
|
||||||
calBlockTbName(pInfo, pSDB);
|
calBlockTbName(pInfo, pSDB);
|
||||||
return pSDB;
|
return pSDB;
|
||||||
|
|
|
@ -1034,7 +1034,7 @@ int32_t doStreamIntervalEncodeOpState(void** buf, int32_t len, SOperatorInfo* pO
|
||||||
while ((pIte = taosHashIterate(pInfo->pPullDataMap, pIte)) != NULL) {
|
while ((pIte = taosHashIterate(pInfo->pPullDataMap, pIte)) != NULL) {
|
||||||
void* key = taosHashGetKey(pIte, &keyLen);
|
void* key = taosHashGetKey(pIte, &keyLen);
|
||||||
tlen += encodeSWinKey(buf, key);
|
tlen += encodeSWinKey(buf, key);
|
||||||
SArray* pArray = (SArray*)pIte;
|
SArray* pArray = *(SArray**)pIte;
|
||||||
int32_t chSize = taosArrayGetSize(pArray);
|
int32_t chSize = taosArrayGetSize(pArray);
|
||||||
tlen += taosEncodeFixedI32(buf, chSize);
|
tlen += taosEncodeFixedI32(buf, chSize);
|
||||||
for (int32_t i = 0; i < chSize; i++) {
|
for (int32_t i = 0; i < chSize; i++) {
|
||||||
|
|
|
@ -651,7 +651,7 @@ static int32_t translateApercentileImpl(SFunctionNode* pFunc, char* pErrBuf, int
|
||||||
(SDataType){.bytes = getApercentileMaxSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY};
|
(SDataType){.bytes = getApercentileMaxSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY};
|
||||||
} else {
|
} else {
|
||||||
// original percent param is reserved
|
// original percent param is reserved
|
||||||
if (2 != numOfParams) {
|
if (3 != numOfParams && 2 != numOfParams) {
|
||||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
||||||
|
@ -660,6 +660,19 @@ static int32_t translateApercentileImpl(SFunctionNode* pFunc, char* pErrBuf, int
|
||||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (3 == numOfParams) {
|
||||||
|
uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type;
|
||||||
|
if (!IS_STR_DATA_TYPE(para3Type)) {
|
||||||
|
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||||
|
}
|
||||||
|
|
||||||
|
SNode* pParamNode2 = nodesListGetNode(pFunc->pParameterList, 2);
|
||||||
|
if (QUERY_NODE_VALUE != nodeType(pParamNode2) || !validateApercentileAlgo((SValueNode*)pParamNode2)) {
|
||||||
|
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
||||||
|
"Third parameter algorithm of apercentile must be 'default' or 't-digest'");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE};
|
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -744,7 +757,11 @@ int32_t topBotCreateMergeParam(SNodeList* pRawParameters, SNode* pPartialRes, SN
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t apercentileCreateMergeParam(SNodeList* pRawParameters, SNode* pPartialRes, SNodeList** pParameters) {
|
int32_t apercentileCreateMergeParam(SNodeList* pRawParameters, SNode* pPartialRes, SNodeList** pParameters) {
|
||||||
return reserveFirstMergeParam(pRawParameters, pPartialRes, pParameters);
|
int32_t code = reserveFirstMergeParam(pRawParameters, pPartialRes, pParameters);
|
||||||
|
if (TSDB_CODE_SUCCESS == code && pRawParameters->length >= 3) {
|
||||||
|
code = nodesListStrictAppend(*pParameters, nodesCloneNode(nodesListGetNode(pRawParameters, 2)));
|
||||||
|
}
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t translateSpread(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
static int32_t translateSpread(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||||
|
|
|
@ -1904,7 +1904,7 @@ int32_t apercentileFunction(SqlFunctionCtx* pCtx) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo* pOutput) {
|
static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo* pOutput, bool* hasRes) {
|
||||||
pOutput->percent = pInput->percent;
|
pOutput->percent = pInput->percent;
|
||||||
pOutput->algo = pInput->algo;
|
pOutput->algo = pInput->algo;
|
||||||
if (pOutput->algo == APERCT_ALGO_TDIGEST) {
|
if (pOutput->algo == APERCT_ALGO_TDIGEST) {
|
||||||
|
@ -1915,6 +1915,10 @@ static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo*
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (hasRes) {
|
||||||
|
*hasRes = true;
|
||||||
|
}
|
||||||
|
|
||||||
buildTDigestInfo(pOutput);
|
buildTDigestInfo(pOutput);
|
||||||
TDigest* pTDigest = pOutput->pTDigest;
|
TDigest* pTDigest = pOutput->pTDigest;
|
||||||
tdigestAutoFill(pTDigest, COMPRESSION);
|
tdigestAutoFill(pTDigest, COMPRESSION);
|
||||||
|
@ -1931,6 +1935,10 @@ static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo*
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (hasRes) {
|
||||||
|
*hasRes = true;
|
||||||
|
}
|
||||||
|
|
||||||
buildHistogramInfo(pOutput);
|
buildHistogramInfo(pOutput);
|
||||||
SHistogramInfo* pHisto = pOutput->pHisto;
|
SHistogramInfo* pHisto = pOutput->pHisto;
|
||||||
|
|
||||||
|
@ -1970,12 +1978,13 @@ int32_t apercentileFunctionMerge(SqlFunctionCtx* pCtx) {
|
||||||
|
|
||||||
qDebug("%s total %" PRId64 " rows will merge, %p", __FUNCTION__, pInput->numOfRows, pInfo->pHisto);
|
qDebug("%s total %" PRId64 " rows will merge, %p", __FUNCTION__, pInput->numOfRows, pInfo->pHisto);
|
||||||
|
|
||||||
|
bool hasRes = false;
|
||||||
int32_t start = pInput->startRowIndex;
|
int32_t start = pInput->startRowIndex;
|
||||||
for (int32_t i = start; i < start + pInput->numOfRows; ++i) {
|
for (int32_t i = start; i < start + pInput->numOfRows; ++i) {
|
||||||
char* data = colDataGetData(pCol, i);
|
char* data = colDataGetData(pCol, i);
|
||||||
|
|
||||||
SAPercentileInfo* pInputInfo = (SAPercentileInfo*)varDataVal(data);
|
SAPercentileInfo* pInputInfo = (SAPercentileInfo*)varDataVal(data);
|
||||||
apercentileTransferInfo(pInputInfo, pInfo);
|
apercentileTransferInfo(pInputInfo, pInfo, &hasRes);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pInfo->algo != APERCT_ALGO_TDIGEST) {
|
if (pInfo->algo != APERCT_ALGO_TDIGEST) {
|
||||||
|
@ -1984,7 +1993,7 @@ int32_t apercentileFunctionMerge(SqlFunctionCtx* pCtx) {
|
||||||
pInfo->pHisto->numOfEntries, pInfo->pHisto);
|
pInfo->pHisto->numOfEntries, pInfo->pHisto);
|
||||||
}
|
}
|
||||||
|
|
||||||
SET_VAL(pResInfo, 1, 1);
|
SET_VAL(pResInfo, hasRes ? 1 : 0, 1);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2056,7 +2065,7 @@ int32_t apercentileCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx)
|
||||||
|
|
||||||
qDebug("%s start to combine apercentile, %p", __FUNCTION__, pDBuf->pHisto);
|
qDebug("%s start to combine apercentile, %p", __FUNCTION__, pDBuf->pHisto);
|
||||||
|
|
||||||
apercentileTransferInfo(pSBuf, pDBuf);
|
apercentileTransferInfo(pSBuf, pDBuf, NULL);
|
||||||
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
|
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
|
||||||
pDResInfo->isNullRes &= pSResInfo->isNullRes;
|
pDResInfo->isNullRes &= pSResInfo->isNullRes;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
|
|
@ -88,6 +88,7 @@ static EDealRes doRewriteExpr(SNode** pNode, void* pContext) {
|
||||||
pCxt->pOutputs[index] = true;
|
pCxt->pOutputs[index] = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
index++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -174,6 +175,7 @@ static int32_t cloneRewriteExprs(SNodeList* pExprs, bool* pOutputs, SNodeList**
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
index++;
|
||||||
}
|
}
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3983,18 +3983,15 @@ _return:
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t fltSclGetDatumValueFromPoint(SFltSclPoint *point, SFltSclDatum *d) {
|
static int32_t fltSclGetTimeStampDatum(SFltSclPoint *point, SFltSclDatum *d) {
|
||||||
*d = point->val;
|
*d = point->val;
|
||||||
if (point->val.kind == FLT_SCL_DATUM_KIND_NULL) {
|
d->kind = FLT_SCL_DATUM_KIND_INT64;
|
||||||
return TSDB_CODE_SUCCESS;
|
|
||||||
}
|
|
||||||
if (point->val.kind == FLT_SCL_DATUM_KIND_MAX) {
|
|
||||||
getDataMax(d->type.type, &(d->i));
|
|
||||||
} else if (point->val.kind == FLT_SCL_DATUM_KIND_MIN) {
|
|
||||||
getDataMin(d->type.type, &(d->i));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (IS_INTEGER_TYPE(d->type.type) || IS_TIMESTAMP_TYPE(d->type.type)) {
|
if (point->val.kind == FLT_SCL_DATUM_KIND_MAX) {
|
||||||
|
getDataMax(point->val.type.type, &(d->i));
|
||||||
|
} else if (point->val.kind == FLT_SCL_DATUM_KIND_MIN) {
|
||||||
|
getDataMin(point->val.type.type, &(d->i));
|
||||||
|
} else if (point->val.kind == FLT_SCL_DATUM_KIND_INT64) {
|
||||||
if (point->excl) {
|
if (point->excl) {
|
||||||
if (point->start) {
|
if (point->start) {
|
||||||
++d->i;
|
++d->i;
|
||||||
|
@ -4002,6 +3999,28 @@ static int32_t fltSclGetDatumValueFromPoint(SFltSclPoint *point, SFltSclDatum *d
|
||||||
--d->i;
|
--d->i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else if (point->val.kind == FLT_SCL_DATUM_KIND_FLOAT64) {
|
||||||
|
double v = d->d;
|
||||||
|
if (point->excl) {
|
||||||
|
if (point->start) {
|
||||||
|
d->i = v + 1;
|
||||||
|
} else {
|
||||||
|
d->i = v - 1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
d->i = v;
|
||||||
|
}
|
||||||
|
} else if (point->val.kind == FLT_SCL_DATUM_KIND_UINT64) {
|
||||||
|
uint64_t v = d->u;
|
||||||
|
if (point->excl) {
|
||||||
|
if (point->start) {
|
||||||
|
d->i = v + 1;
|
||||||
|
} else {
|
||||||
|
d->i = v - 1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
d->i = v;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
qError("not supported type %d when get datum from point", d->type.type);
|
qError("not supported type %d when get datum from point", d->type.type);
|
||||||
}
|
}
|
||||||
|
@ -4022,12 +4041,13 @@ int32_t filterGetTimeRange(SNode *pNode, STimeWindow *win, bool *isStrict) {
|
||||||
SFltSclColumnRange *colRange = taosArrayGet(colRanges, 0);
|
SFltSclColumnRange *colRange = taosArrayGet(colRanges, 0);
|
||||||
SArray *points = colRange->points;
|
SArray *points = colRange->points;
|
||||||
if (taosArrayGetSize(points) == 2) {
|
if (taosArrayGetSize(points) == 2) {
|
||||||
|
*win = TSWINDOW_DESC_INITIALIZER;
|
||||||
SFltSclPoint *startPt = taosArrayGet(points, 0);
|
SFltSclPoint *startPt = taosArrayGet(points, 0);
|
||||||
SFltSclPoint *endPt = taosArrayGet(points, 1);
|
SFltSclPoint *endPt = taosArrayGet(points, 1);
|
||||||
SFltSclDatum start;
|
SFltSclDatum start;
|
||||||
SFltSclDatum end;
|
SFltSclDatum end;
|
||||||
fltSclGetDatumValueFromPoint(startPt, &start);
|
fltSclGetTimeStampDatum(startPt, &start);
|
||||||
fltSclGetDatumValueFromPoint(endPt, &end);
|
fltSclGetTimeStampDatum(endPt, &end);
|
||||||
win->skey = start.i;
|
win->skey = start.i;
|
||||||
win->ekey = end.i;
|
win->ekey = end.i;
|
||||||
*isStrict = true;
|
*isStrict = true;
|
||||||
|
|
|
@ -26,8 +26,28 @@
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define ONE_MB_F (1048576.0)
|
#define CHECK_DOWNSTREAM_INTERVAL 100
|
||||||
#define SIZE_IN_MB(_v) ((_v) / ONE_MB_F)
|
#define LAUNCH_HTASK_INTERVAL 100
|
||||||
|
#define WAIT_FOR_MINIMAL_INTERVAL 100.00
|
||||||
|
#define MAX_RETRY_LAUNCH_HISTORY_TASK 40
|
||||||
|
#define RETRY_LAUNCH_INTERVAL_INC_RATE 1.2
|
||||||
|
|
||||||
|
#define MAX_BLOCK_NAME_NUM 1024
|
||||||
|
#define DISPATCH_RETRY_INTERVAL_MS 300
|
||||||
|
#define MAX_CONTINUE_RETRY_COUNT 5
|
||||||
|
|
||||||
|
#define META_HB_CHECK_INTERVAL 200
|
||||||
|
#define META_HB_SEND_IDLE_COUNTER 25 // send hb every 5 sec
|
||||||
|
#define STREAM_TASK_KEY_LEN ((sizeof(int64_t)) << 1)
|
||||||
|
|
||||||
|
// clang-format off
|
||||||
|
#define stFatal(...) do { if (stDebugFlag & DEBUG_FATAL) { taosPrintLog("STM FATAL ", DEBUG_FATAL, 255, __VA_ARGS__); }} while(0)
|
||||||
|
#define stError(...) do { if (stDebugFlag & DEBUG_ERROR) { taosPrintLog("STM ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0)
|
||||||
|
#define stWarn(...) do { if (stDebugFlag & DEBUG_WARN) { taosPrintLog("STM WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0)
|
||||||
|
#define stInfo(...) do { if (stDebugFlag & DEBUG_INFO) { taosPrintLog("STM ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
|
||||||
|
#define stDebug(...) do { if (stDebugFlag & DEBUG_DEBUG) { taosPrintLog("STM ", DEBUG_DEBUG, tqDebugFlag, __VA_ARGS__); }} while(0)
|
||||||
|
#define stTrace(...) do { if (stDebugFlag & DEBUG_TRACE) { taosPrintLog("STM ", DEBUG_TRACE, tqDebugFlag, __VA_ARGS__); }} while(0)
|
||||||
|
// clang-format on
|
||||||
|
|
||||||
typedef struct SStreamGlobalEnv {
|
typedef struct SStreamGlobalEnv {
|
||||||
int8_t inited;
|
int8_t inited;
|
||||||
|
@ -41,19 +61,23 @@ typedef struct SStreamContinueExecInfo {
|
||||||
} SStreamContinueExecInfo;
|
} SStreamContinueExecInfo;
|
||||||
|
|
||||||
struct STokenBucket {
|
struct STokenBucket {
|
||||||
int32_t capacity; // total capacity
|
int32_t numCapacity; // total capacity, available token per second
|
||||||
int64_t fillTimestamp;// fill timestamp
|
|
||||||
int32_t numOfToken; // total available tokens
|
int32_t numOfToken; // total available tokens
|
||||||
int32_t rate; // number of token per second
|
int32_t numRate; // number of token per second
|
||||||
|
double bytesCapacity; // available capacity for maximum input size, KiloBytes per Second
|
||||||
|
double bytesRemain; // not consumed bytes per second
|
||||||
|
double bytesRate; // number of token per second
|
||||||
|
int64_t fillTimestamp; // fill timestamp
|
||||||
};
|
};
|
||||||
|
|
||||||
extern SStreamGlobalEnv streamEnv;
|
extern SStreamGlobalEnv streamEnv;
|
||||||
extern int32_t streamBackendId;
|
extern int32_t streamBackendId;
|
||||||
extern int32_t streamBackendCfWrapperId;
|
extern int32_t streamBackendCfWrapperId;
|
||||||
|
|
||||||
const char* streamGetBlockTypeStr(int32_t type);
|
void streamRetryDispatchData(SStreamTask* pTask, int64_t waitDuration);
|
||||||
void streamRetryDispatchStreamBlock(SStreamTask* pTask, int64_t waitDuration);
|
|
||||||
int32_t streamDispatchStreamBlock(SStreamTask* pTask);
|
int32_t streamDispatchStreamBlock(SStreamTask* pTask);
|
||||||
|
void destroyDispatchMsg(SStreamDispatchReq* pReq, int32_t numOfVgroups);
|
||||||
|
int32_t getNumOfDispatchBranch(SStreamTask* pTask);
|
||||||
|
|
||||||
int32_t streamProcessCheckpointBlock(SStreamTask* pTask, SStreamDataBlock* pBlock);
|
int32_t streamProcessCheckpointBlock(SStreamTask* pTask, SStreamDataBlock* pBlock);
|
||||||
SStreamDataBlock* createStreamBlockFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t blockType, int32_t srcVg);
|
SStreamDataBlock* createStreamBlockFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t blockType, int32_t srcVg);
|
||||||
|
@ -68,14 +92,19 @@ int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* p
|
||||||
|
|
||||||
int32_t streamSaveAllTaskStatus(SStreamMeta* pMeta, int64_t checkpointId);
|
int32_t streamSaveAllTaskStatus(SStreamMeta* pMeta, int64_t checkpointId);
|
||||||
int32_t streamTaskBuildCheckpoint(SStreamTask* pTask);
|
int32_t streamTaskBuildCheckpoint(SStreamTask* pTask);
|
||||||
int32_t streamDispatchCheckMsg(SStreamTask* pTask, const SStreamTaskCheckReq* pReq, int32_t nodeId, SEpSet* pEpSet);
|
int32_t streamSendCheckMsg(SStreamTask* pTask, const SStreamTaskCheckReq* pReq, int32_t nodeId, SEpSet* pEpSet);
|
||||||
|
|
||||||
int32_t streamAddCheckpointReadyMsg(SStreamTask* pTask, int32_t srcTaskId, int32_t index, int64_t checkpointId);
|
int32_t streamAddCheckpointReadyMsg(SStreamTask* pTask, int32_t srcTaskId, int32_t index, int64_t checkpointId);
|
||||||
int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask);
|
int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask);
|
||||||
int32_t streamTaskSendCheckpointSourceRsp(SStreamTask* pTask);
|
int32_t streamTaskSendCheckpointSourceRsp(SStreamTask* pTask);
|
||||||
int32_t streamTaskGetNumOfDownstream(const SStreamTask* pTask);
|
int32_t streamTaskGetNumOfDownstream(const SStreamTask* pTask);
|
||||||
|
|
||||||
int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks);
|
int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks, int32_t* blockSize);
|
||||||
|
int32_t streamQueueGetNumOfItemsInQueue(const SStreamQueue* pQueue);
|
||||||
|
int32_t streamQueueItemGetSize(const SStreamQueueItem* pItem);
|
||||||
|
void streamQueueItemIncSize(const SStreamQueueItem* pItem, int32_t size);
|
||||||
|
const char* streamQueueItemGetTypeStr(int32_t type);
|
||||||
|
|
||||||
SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem* pElem);
|
SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem* pElem);
|
||||||
|
|
||||||
int32_t streamTaskBuildScanhistoryRspMsg(SStreamTask* pTask, SStreamScanHistoryFinishReq* pReq, void** pBuffer, int32_t* pLen);
|
int32_t streamTaskBuildScanhistoryRspMsg(SStreamTask* pTask, SStreamScanHistoryFinishReq* pReq, void** pBuffer, int32_t* pLen);
|
||||||
|
@ -84,7 +113,10 @@ int32_t streamNotifyUpstreamContinue(SStreamTask* pTask);
|
||||||
int32_t streamTaskFillHistoryFinished(SStreamTask* pTask);
|
int32_t streamTaskFillHistoryFinished(SStreamTask* pTask);
|
||||||
int32_t streamTransferStateToStreamTask(SStreamTask* pTask);
|
int32_t streamTransferStateToStreamTask(SStreamTask* pTask);
|
||||||
|
|
||||||
int32_t streamTaskInitTokenBucket(STokenBucket* pBucket, int32_t cap, int32_t rate);
|
int32_t streamTaskInitTokenBucket(STokenBucket* pBucket, int32_t numCap, int32_t numRate, int32_t bytesRate);
|
||||||
|
STaskId streamTaskExtractKey(const SStreamTask* pTask);
|
||||||
|
void streamTaskInitForLaunchHTask(SHistoryTaskInfo* pInfo);
|
||||||
|
void streamTaskSetRetryInfoForLaunch(SHistoryTaskInfo* pInfo);
|
||||||
|
|
||||||
SStreamQueue* streamQueueOpen(int64_t cap);
|
SStreamQueue* streamQueueOpen(int64_t cap);
|
||||||
void streamQueueClose(SStreamQueue* pQueue, int32_t taskId);
|
void streamQueueClose(SStreamQueue* pQueue, int32_t taskId);
|
||||||
|
@ -93,7 +125,7 @@ void streamQueueProcessFail(SStreamQueue* queue);
|
||||||
void* streamQueueNextItem(SStreamQueue* pQueue);
|
void* streamQueueNextItem(SStreamQueue* pQueue);
|
||||||
void streamFreeQitem(SStreamQueueItem* data);
|
void streamFreeQitem(SStreamQueueItem* data);
|
||||||
|
|
||||||
STaskId extractStreamTaskKey(const SStreamTask* pTask);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -60,10 +60,10 @@ static void streamSchedByTimer(void* param, void* tmrId) {
|
||||||
SStreamTask* pTask = (void*)param;
|
SStreamTask* pTask = (void*)param;
|
||||||
|
|
||||||
int8_t status = atomic_load_8(&pTask->schedInfo.status);
|
int8_t status = atomic_load_8(&pTask->schedInfo.status);
|
||||||
qDebug("s-task:%s in scheduler, trigger status:%d, next:%dms", pTask->id.idStr, status, (int32_t)pTask->info.triggerParam);
|
stDebug("s-task:%s in scheduler, trigger status:%d, next:%dms", pTask->id.idStr, status, (int32_t)pTask->info.triggerParam);
|
||||||
|
|
||||||
if (streamTaskShouldStop(&pTask->status) || streamTaskShouldPause(&pTask->status)) {
|
if (streamTaskShouldStop(&pTask->status) || streamTaskShouldPause(&pTask->status)) {
|
||||||
qDebug("s-task:%s jump out of schedTimer", pTask->id.idStr);
|
stDebug("s-task:%s jump out of schedTimer", pTask->id.idStr);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,7 +98,7 @@ int32_t streamSetupScheduleTrigger(SStreamTask* pTask) {
|
||||||
int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1);
|
int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1);
|
||||||
ASSERT(ref == 2 && pTask->schedInfo.pTimer == NULL);
|
ASSERT(ref == 2 && pTask->schedInfo.pTimer == NULL);
|
||||||
|
|
||||||
qDebug("s-task:%s setup scheduler trigger, delay:%" PRId64 " ms", pTask->id.idStr, pTask->info.triggerParam);
|
stDebug("s-task:%s setup scheduler trigger, delay:%" PRId64 " ms", pTask->id.idStr, pTask->info.triggerParam);
|
||||||
|
|
||||||
pTask->schedInfo.pTimer = taosTmrStart(streamSchedByTimer, (int32_t)pTask->info.triggerParam, pTask, streamEnv.timer);
|
pTask->schedInfo.pTimer = taosTmrStart(streamSchedByTimer, (int32_t)pTask->info.triggerParam, pTask, streamEnv.timer);
|
||||||
pTask->schedInfo.status = TASK_TRIGGER_STATUS__INACTIVE;
|
pTask->schedInfo.status = TASK_TRIGGER_STATUS__INACTIVE;
|
||||||
|
@ -114,7 +114,7 @@ int32_t streamSchedExec(SStreamTask* pTask) {
|
||||||
if (pRunReq == NULL) {
|
if (pRunReq == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
/*int8_t status = */streamTaskSetSchedStatusInActive(pTask);
|
/*int8_t status = */streamTaskSetSchedStatusInActive(pTask);
|
||||||
qError("failed to create msg to aunch s-task:%s, reason out of memory", pTask->id.idStr);
|
stError("failed to create msg to aunch s-task:%s, reason out of memory", pTask->id.idStr);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -122,12 +122,12 @@ int32_t streamSchedExec(SStreamTask* pTask) {
|
||||||
pRunReq->streamId = pTask->id.streamId;
|
pRunReq->streamId = pTask->id.streamId;
|
||||||
pRunReq->taskId = pTask->id.taskId;
|
pRunReq->taskId = pTask->id.taskId;
|
||||||
|
|
||||||
qDebug("trigger to run s-task:%s", pTask->id.idStr);
|
stDebug("trigger to run s-task:%s", pTask->id.idStr);
|
||||||
|
|
||||||
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
|
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
|
||||||
tmsgPutToQueue(pTask->pMsgCb, STREAM_QUEUE, &msg);
|
tmsgPutToQueue(pTask->pMsgCb, STREAM_QUEUE, &msg);
|
||||||
} else {
|
} else {
|
||||||
qDebug("s-task:%s not launch task since sched status:%d", pTask->id.idStr, pTask->status.schedStatus);
|
stDebug("s-task:%s not launch task since sched status:%d", pTask->id.idStr, pTask->status.schedStatus);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -140,8 +140,12 @@ static int32_t buildDispatchRsp(const SStreamTask* pTask, const SStreamDispatchR
|
||||||
}
|
}
|
||||||
|
|
||||||
((SMsgHead*)(*pBuf))->vgId = htonl(pReq->upstreamNodeId);
|
((SMsgHead*)(*pBuf))->vgId = htonl(pReq->upstreamNodeId);
|
||||||
|
ASSERT(((SMsgHead*)(*pBuf))->vgId != 0);
|
||||||
|
|
||||||
SStreamDispatchRsp* pDispatchRsp = POINTER_SHIFT((*pBuf), sizeof(SMsgHead));
|
SStreamDispatchRsp* pDispatchRsp = POINTER_SHIFT((*pBuf), sizeof(SMsgHead));
|
||||||
|
|
||||||
|
pDispatchRsp->stage = htobe64(pReq->stage);
|
||||||
|
pDispatchRsp->msgId = htonl(pReq->msgId);
|
||||||
pDispatchRsp->inputStatus = status;
|
pDispatchRsp->inputStatus = status;
|
||||||
pDispatchRsp->streamId = htobe64(pReq->streamId);
|
pDispatchRsp->streamId = htobe64(pReq->streamId);
|
||||||
pDispatchRsp->upstreamNodeId = htonl(pReq->upstreamNodeId);
|
pDispatchRsp->upstreamNodeId = htonl(pReq->upstreamNodeId);
|
||||||
|
@ -159,7 +163,7 @@ static int32_t streamTaskAppendInputBlocks(SStreamTask* pTask, const SStreamDisp
|
||||||
if (pBlock == NULL) {
|
if (pBlock == NULL) {
|
||||||
streamTaskInputFail(pTask);
|
streamTaskInputFail(pTask);
|
||||||
status = TASK_INPUT_STATUS__FAILED;
|
status = TASK_INPUT_STATUS__FAILED;
|
||||||
qError("vgId:%d, s-task:%s failed to receive dispatch msg, reason: out of memory", pTask->pMeta->vgId,
|
stError("vgId:%d, s-task:%s failed to receive dispatch msg, reason: out of memory", pTask->pMeta->vgId,
|
||||||
pTask->id.idStr);
|
pTask->id.idStr);
|
||||||
} else {
|
} else {
|
||||||
if (pBlock->type == STREAM_INPUT__TRANS_STATE) {
|
if (pBlock->type == STREAM_INPUT__TRANS_STATE) {
|
||||||
|
@ -180,7 +184,7 @@ int32_t streamTaskEnqueueRetrieve(SStreamTask* pTask, SStreamRetrieveReq* pReq,
|
||||||
|
|
||||||
// enqueue
|
// enqueue
|
||||||
if (pData != NULL) {
|
if (pData != NULL) {
|
||||||
qDebug("s-task:%s (child %d) recv retrieve req from task:0x%x(vgId:%d), reqId:0x%" PRIx64, pTask->id.idStr,
|
stDebug("s-task:%s (child %d) recv retrieve req from task:0x%x(vgId:%d), reqId:0x%" PRIx64, pTask->id.idStr,
|
||||||
pTask->info.selfChildId, pReq->srcTaskId, pReq->srcNodeId, pReq->reqId);
|
pTask->info.selfChildId, pReq->srcTaskId, pReq->srcNodeId, pReq->reqId);
|
||||||
|
|
||||||
pData->type = STREAM_INPUT__DATA_RETRIEVE;
|
pData->type = STREAM_INPUT__DATA_RETRIEVE;
|
||||||
|
@ -210,42 +214,54 @@ int32_t streamTaskEnqueueRetrieve(SStreamTask* pTask, SStreamRetrieveReq* pReq,
|
||||||
return status == TASK_INPUT_STATUS__NORMAL ? 0 : -1;
|
return status == TASK_INPUT_STATUS__NORMAL ? 0 : -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp, bool exec) {
|
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp) {
|
||||||
qDebug("s-task:%s receive dispatch msg from taskId:0x%x(vgId:%d), msgLen:%" PRId64, pTask->id.idStr,
|
|
||||||
pReq->upstreamTaskId, pReq->upstreamNodeId, pReq->totalLen);
|
|
||||||
int32_t status = 0;
|
int32_t status = 0;
|
||||||
|
const char* id = pTask->id.idStr;
|
||||||
|
|
||||||
|
stDebug("s-task:%s receive dispatch msg from taskId:0x%x(vgId:%d), msgLen:%" PRId64 ", msgId:%d", id,
|
||||||
|
pReq->upstreamTaskId, pReq->upstreamNodeId, pReq->totalLen, pReq->msgId);
|
||||||
|
|
||||||
SStreamChildEpInfo* pInfo = streamTaskGetUpstreamTaskEpInfo(pTask, pReq->upstreamTaskId);
|
SStreamChildEpInfo* pInfo = streamTaskGetUpstreamTaskEpInfo(pTask, pReq->upstreamTaskId);
|
||||||
ASSERT(pInfo != NULL);
|
ASSERT(pInfo != NULL);
|
||||||
|
|
||||||
// upstream task has restarted/leader-follower switch/transferred to other dnodes
|
if (pTask->pMeta->role == NODE_ROLE_FOLLOWER) {
|
||||||
|
stError("s-task:%s task on follower received dispatch msgs, dispatch msg rejected", id);
|
||||||
|
status = TASK_INPUT_STATUS__REFUSED;
|
||||||
|
} else {
|
||||||
if (pReq->stage > pInfo->stage) {
|
if (pReq->stage > pInfo->stage) {
|
||||||
qError("s-task:%s upstream task:0x%x (vgId:%d) has restart/leader-switch/vnode-transfer, prev stage:%" PRId64
|
// upstream task has restarted/leader-follower switch/transferred to other dnodes
|
||||||
|
stError("s-task:%s upstream task:0x%x (vgId:%d) has restart/leader-switch/vnode-transfer, prev stage:%" PRId64
|
||||||
", current:%" PRId64 " dispatch msg rejected",
|
", current:%" PRId64 " dispatch msg rejected",
|
||||||
pTask->id.idStr, pReq->upstreamTaskId, pReq->upstreamNodeId, pInfo->stage, pReq->stage);
|
id, pReq->upstreamTaskId, pReq->upstreamNodeId, pInfo->stage, pReq->stage);
|
||||||
status = TASK_INPUT_STATUS__BLOCKED;
|
status = TASK_INPUT_STATUS__REFUSED;
|
||||||
} else {
|
} else {
|
||||||
if (!pInfo->dataAllowed) {
|
if (!pInfo->dataAllowed) {
|
||||||
qWarn("s-task:%s data from task:0x%x is denied, since inputQ is closed for it", pTask->id.idStr,
|
stWarn("s-task:%s data from task:0x%x is denied, since inputQ is closed for it", id, pReq->upstreamTaskId);
|
||||||
pReq->upstreamTaskId);
|
|
||||||
status = TASK_INPUT_STATUS__BLOCKED;
|
status = TASK_INPUT_STATUS__BLOCKED;
|
||||||
} else {
|
} else {
|
||||||
// Current task has received the checkpoint req from the upstream task, from which the message should all be
|
// This task has received the checkpoint req from the upstream task, from which all the messages should be
|
||||||
// blocked
|
// blocked
|
||||||
if (pReq->type == STREAM_INPUT__CHECKPOINT_TRIGGER) {
|
if (pReq->type == STREAM_INPUT__CHECKPOINT_TRIGGER) {
|
||||||
streamTaskCloseUpstreamInput(pTask, pReq->upstreamTaskId);
|
streamTaskCloseUpstreamInput(pTask, pReq->upstreamTaskId);
|
||||||
qDebug("s-task:%s close inputQ for upstream:0x%x", pTask->id.idStr, pReq->upstreamTaskId);
|
stDebug("s-task:%s close inputQ for upstream:0x%x, msgId:%d", id, pReq->upstreamTaskId, pReq->msgId);
|
||||||
}
|
}
|
||||||
|
|
||||||
status = streamTaskAppendInputBlocks(pTask, pReq);
|
status = streamTaskAppendInputBlocks(pTask, pReq);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// disable the data from upstream tasks
|
||||||
|
int8_t st = pTask->status.taskStatus;
|
||||||
|
if (st == TASK_STATUS__HALT) {
|
||||||
|
status = TASK_INPUT_STATUS__BLOCKED;
|
||||||
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
// do send response with the input status
|
// do send response with the input status
|
||||||
int32_t code = buildDispatchRsp(pTask, pReq, status, &pRsp->pCont);
|
int32_t code = buildDispatchRsp(pTask, pReq, status, &pRsp->pCont);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
// todo handle failure
|
stError("s-task:%s failed to build dispatch rsp, msgId:%d, code:%s", id, pReq->msgId, tstrerror(code));
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -254,11 +270,8 @@ int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, S
|
||||||
}
|
}
|
||||||
|
|
||||||
tDeleteStreamDispatchReq(pReq);
|
tDeleteStreamDispatchReq(pReq);
|
||||||
|
streamSchedExec(pTask);
|
||||||
|
|
||||||
int8_t schedStatus = streamTaskSetSchedStatusWait(pTask);
|
|
||||||
if (schedStatus == TASK_SCHED_STATUS__INACTIVE) {
|
|
||||||
streamTryExec(pTask);
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -306,6 +319,6 @@ SStreamChildEpInfo* streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
qError("s-task:%s failed to find upstream task:0x%x", pTask->id.idStr, taskId);
|
stError("s-task:%s failed to find upstream task:0x%x", pTask->id.idStr, taskId);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
|
@ -303,7 +303,7 @@ int32_t bkdMgtDumpTo(SBackendManager* bm, char* dname) {
|
||||||
sprintf(dstDir, "%s%s%s", bm->path, TD_DIRSEP, dname);
|
sprintf(dstDir, "%s%s%s", bm->path, TD_DIRSEP, dname);
|
||||||
|
|
||||||
if (!taosDirExist(srcDir)) {
|
if (!taosDirExist(srcDir)) {
|
||||||
qError("failed to dump srcDir %s, reason: not exist such dir", srcDir);
|
stError("failed to dump srcDir %s, reason: not exist such dir", srcDir);
|
||||||
code = -1;
|
code = -1;
|
||||||
goto _ERROR;
|
goto _ERROR;
|
||||||
}
|
}
|
||||||
|
@ -311,7 +311,7 @@ int32_t bkdMgtDumpTo(SBackendManager* bm, char* dname) {
|
||||||
code = taosMkDir(dstDir);
|
code = taosMkDir(dstDir);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||||
qError("failed to mkdir srcDir %s, reason: %s", dstDir, terrstr());
|
stError("failed to mkdir srcDir %s, reason: %s", dstDir, terrstr());
|
||||||
goto _ERROR;
|
goto _ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -452,13 +452,13 @@ int32_t rebuildDirFromCheckpoint(const char* path, int64_t chkpId, char** dst) {
|
||||||
taosMkDir(state);
|
taosMkDir(state);
|
||||||
code = copyFiles(chkp, state);
|
code = copyFiles(chkp, state);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
qError("failed to restart stream backend from %s, reason: %s", chkp, tstrerror(TAOS_SYSTEM_ERROR(errno)));
|
stError("failed to restart stream backend from %s, reason: %s", chkp, tstrerror(TAOS_SYSTEM_ERROR(errno)));
|
||||||
} else {
|
} else {
|
||||||
qInfo("start to restart stream backend at checkpoint path: %s", chkp);
|
stInfo("start to restart stream backend at checkpoint path: %s", chkp);
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
qError("failed to start stream backend at %s, reason: %s, restart from default state dir:%s", chkp,
|
stError("failed to start stream backend at %s, reason: %s, restart from default state dir:%s", chkp,
|
||||||
tstrerror(TAOS_SYSTEM_ERROR(errno)), state);
|
tstrerror(TAOS_SYSTEM_ERROR(errno)), state);
|
||||||
taosMkDir(state);
|
taosMkDir(state);
|
||||||
}
|
}
|
||||||
|
@ -473,7 +473,7 @@ void* streamBackendInit(const char* streamPath, int64_t chkpId) {
|
||||||
char* backendPath = NULL;
|
char* backendPath = NULL;
|
||||||
int32_t code = rebuildDirFromCheckpoint(streamPath, chkpId, &backendPath);
|
int32_t code = rebuildDirFromCheckpoint(streamPath, chkpId, &backendPath);
|
||||||
|
|
||||||
qDebug("start to init stream backend at %s, checkpointid: %" PRId64 "", backendPath, chkpId);
|
stDebug("start to init stream backend at %s, checkpointid: %" PRId64 "", backendPath, chkpId);
|
||||||
|
|
||||||
uint32_t dbMemLimit = nextPow2(tsMaxStreamBackendCache) << 20;
|
uint32_t dbMemLimit = nextPow2(tsMaxStreamBackendCache) << 20;
|
||||||
SBackendWrapper* pHandle = taosMemoryCalloc(1, sizeof(SBackendWrapper));
|
SBackendWrapper* pHandle = taosMemoryCalloc(1, sizeof(SBackendWrapper));
|
||||||
|
@ -517,7 +517,7 @@ void* streamBackendInit(const char* streamPath, int64_t chkpId) {
|
||||||
taosMemoryFreeClear(err);
|
taosMemoryFreeClear(err);
|
||||||
pHandle->db = rocksdb_open(opts, backendPath, &err);
|
pHandle->db = rocksdb_open(opts, backendPath, &err);
|
||||||
if (err != NULL) {
|
if (err != NULL) {
|
||||||
qError("failed to open rocksdb, path:%s, reason:%s", backendPath, err);
|
stError("failed to open rocksdb, path:%s, reason:%s", backendPath, err);
|
||||||
taosMemoryFreeClear(err);
|
taosMemoryFreeClear(err);
|
||||||
goto _EXIT;
|
goto _EXIT;
|
||||||
}
|
}
|
||||||
|
@ -534,7 +534,7 @@ void* streamBackendInit(const char* streamPath, int64_t chkpId) {
|
||||||
if (cfs != NULL) {
|
if (cfs != NULL) {
|
||||||
rocksdb_list_column_families_destroy(cfs, nCf);
|
rocksdb_list_column_families_destroy(cfs, nCf);
|
||||||
}
|
}
|
||||||
qDebug("succ to init stream backend at %s, backend:%p", backendPath, pHandle);
|
stDebug("succ to init stream backend at %s, backend:%p", backendPath, pHandle);
|
||||||
taosMemoryFreeClear(backendPath);
|
taosMemoryFreeClear(backendPath);
|
||||||
|
|
||||||
return (void*)pHandle;
|
return (void*)pHandle;
|
||||||
|
@ -547,7 +547,7 @@ _EXIT:
|
||||||
taosHashCleanup(pHandle->cfInst);
|
taosHashCleanup(pHandle->cfInst);
|
||||||
tdListFree(pHandle->list);
|
tdListFree(pHandle->list);
|
||||||
taosMemoryFree(pHandle);
|
taosMemoryFree(pHandle);
|
||||||
qDebug("failed to init stream backend at %s", backendPath);
|
stDebug("failed to init stream backend at %s", backendPath);
|
||||||
taosMemoryFree(backendPath);
|
taosMemoryFree(backendPath);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -581,7 +581,7 @@ void streamBackendCleanup(void* arg) {
|
||||||
taosThreadMutexDestroy(&pHandle->mutex);
|
taosThreadMutexDestroy(&pHandle->mutex);
|
||||||
|
|
||||||
taosThreadMutexDestroy(&pHandle->cfMutex);
|
taosThreadMutexDestroy(&pHandle->cfMutex);
|
||||||
qDebug("destroy stream backend :%p", pHandle);
|
stDebug("destroy stream backend :%p", pHandle);
|
||||||
taosMemoryFree(pHandle);
|
taosMemoryFree(pHandle);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -590,7 +590,7 @@ void streamBackendHandleCleanup(void* arg) {
|
||||||
bool remove = wrapper->remove;
|
bool remove = wrapper->remove;
|
||||||
taosThreadRwlockWrlock(&wrapper->rwLock);
|
taosThreadRwlockWrlock(&wrapper->rwLock);
|
||||||
|
|
||||||
qDebug("start to do-close backendwrapper %p, %s", wrapper, wrapper->idstr);
|
stDebug("start to do-close backendwrapper %p, %s", wrapper, wrapper->idstr);
|
||||||
if (wrapper->rocksdb == NULL) {
|
if (wrapper->rocksdb == NULL) {
|
||||||
taosThreadRwlockUnlock(&wrapper->rwLock);
|
taosThreadRwlockUnlock(&wrapper->rwLock);
|
||||||
return;
|
return;
|
||||||
|
@ -603,7 +603,7 @@ void streamBackendHandleCleanup(void* arg) {
|
||||||
for (int i = 0; i < cfLen; i++) {
|
for (int i = 0; i < cfLen; i++) {
|
||||||
if (wrapper->pHandle[i] != NULL) rocksdb_drop_column_family(wrapper->rocksdb, wrapper->pHandle[i], &err);
|
if (wrapper->pHandle[i] != NULL) rocksdb_drop_column_family(wrapper->rocksdb, wrapper->pHandle[i], &err);
|
||||||
if (err != NULL) {
|
if (err != NULL) {
|
||||||
qError("failed to drop cf:%s_%s, reason:%s", wrapper->idstr, ginitDict[i].key, err);
|
stError("failed to drop cf:%s_%s, reason:%s", wrapper->idstr, ginitDict[i].key, err);
|
||||||
taosMemoryFreeClear(err);
|
taosMemoryFreeClear(err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -614,7 +614,7 @@ void streamBackendHandleCleanup(void* arg) {
|
||||||
for (int i = 0; i < cfLen; i++) {
|
for (int i = 0; i < cfLen; i++) {
|
||||||
if (wrapper->pHandle[i] != NULL) rocksdb_flush_cf(wrapper->rocksdb, flushOpt, wrapper->pHandle[i], &err);
|
if (wrapper->pHandle[i] != NULL) rocksdb_flush_cf(wrapper->rocksdb, flushOpt, wrapper->pHandle[i], &err);
|
||||||
if (err != NULL) {
|
if (err != NULL) {
|
||||||
qError("failed to flush cf:%s_%s, reason:%s", wrapper->idstr, ginitDict[i].key, err);
|
stError("failed to flush cf:%s_%s, reason:%s", wrapper->idstr, ginitDict[i].key, err);
|
||||||
taosMemoryFreeClear(err);
|
taosMemoryFreeClear(err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -649,7 +649,7 @@ void streamBackendHandleCleanup(void* arg) {
|
||||||
wrapper->rocksdb = NULL;
|
wrapper->rocksdb = NULL;
|
||||||
taosReleaseRef(streamBackendId, wrapper->backendId);
|
taosReleaseRef(streamBackendId, wrapper->backendId);
|
||||||
|
|
||||||
qDebug("end to do-close backendwrapper %p, %s", wrapper, wrapper->idstr);
|
stDebug("end to do-close backendwrapper %p, %s", wrapper, wrapper->idstr);
|
||||||
taosMemoryFree(wrapper);
|
taosMemoryFree(wrapper);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -794,7 +794,10 @@ int32_t chkpGetAllDbCfHandle(SStreamMeta* pMeta, rocksdb_column_family_handle_t*
|
||||||
int64_t id = *(int64_t*)pIter;
|
int64_t id = *(int64_t*)pIter;
|
||||||
|
|
||||||
SBackendCfWrapper* wrapper = taosAcquireRef(streamBackendCfWrapperId, id);
|
SBackendCfWrapper* wrapper = taosAcquireRef(streamBackendCfWrapperId, id);
|
||||||
if (wrapper == NULL) continue;
|
if (wrapper == NULL) {
|
||||||
|
pIter = taosHashIterate(pMeta->pTaskBackendUnique, pIter);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
taosThreadRwlockRdlock(&wrapper->rwLock);
|
taosThreadRwlockRdlock(&wrapper->rwLock);
|
||||||
for (int i = 0; i < sizeof(ginitDict) / sizeof(ginitDict[0]); i++) {
|
for (int i = 0; i < sizeof(ginitDict) / sizeof(ginitDict[0]); i++) {
|
||||||
|
@ -810,6 +813,10 @@ int32_t chkpGetAllDbCfHandle(SStreamMeta* pMeta, rocksdb_column_family_handle_t*
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t nCf = taosArrayGetSize(pHandle);
|
int32_t nCf = taosArrayGetSize(pHandle);
|
||||||
|
if (nCf == 0) {
|
||||||
|
taosArrayDestroy(pHandle);
|
||||||
|
return nCf;
|
||||||
|
}
|
||||||
|
|
||||||
rocksdb_column_family_handle_t** ppCf = taosMemoryCalloc(nCf, sizeof(rocksdb_column_family_handle_t*));
|
rocksdb_column_family_handle_t** ppCf = taosMemoryCalloc(nCf, sizeof(rocksdb_column_family_handle_t*));
|
||||||
for (int i = 0; i < nCf; i++) {
|
for (int i = 0; i < nCf; i++) {
|
||||||
|
@ -825,14 +832,14 @@ int32_t chkpDoDbCheckpoint(rocksdb_t* db, char* path) {
|
||||||
char* err = NULL;
|
char* err = NULL;
|
||||||
rocksdb_checkpoint_t* cp = rocksdb_checkpoint_object_create(db, &err);
|
rocksdb_checkpoint_t* cp = rocksdb_checkpoint_object_create(db, &err);
|
||||||
if (cp == NULL || err != NULL) {
|
if (cp == NULL || err != NULL) {
|
||||||
qError("failed to do checkpoint at:%s, reason:%s", path, err);
|
stError("failed to do checkpoint at:%s, reason:%s", path, err);
|
||||||
taosMemoryFreeClear(err);
|
taosMemoryFreeClear(err);
|
||||||
goto _ERROR;
|
goto _ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
rocksdb_checkpoint_create(cp, path, 64 << 20, &err);
|
rocksdb_checkpoint_create(cp, path, 64 << 20, &err);
|
||||||
if (err != NULL) {
|
if (err != NULL) {
|
||||||
qError("failed to do checkpoint at:%s, reason:%s", path, err);
|
stError("failed to do checkpoint at:%s, reason:%s", path, err);
|
||||||
taosMemoryFreeClear(err);
|
taosMemoryFreeClear(err);
|
||||||
} else {
|
} else {
|
||||||
code = 0;
|
code = 0;
|
||||||
|
@ -842,6 +849,9 @@ _ERROR:
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
int32_t chkpPreFlushDb(rocksdb_t* db, rocksdb_column_family_handle_t** cf, int32_t nCf) {
|
int32_t chkpPreFlushDb(rocksdb_t* db, rocksdb_column_family_handle_t** cf, int32_t nCf) {
|
||||||
|
if (nCf == 0) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
int code = 0;
|
int code = 0;
|
||||||
char* err = NULL;
|
char* err = NULL;
|
||||||
|
|
||||||
|
@ -850,7 +860,7 @@ int32_t chkpPreFlushDb(rocksdb_t* db, rocksdb_column_family_handle_t** cf, int32
|
||||||
|
|
||||||
rocksdb_flush_cfs(db, flushOpt, cf, nCf, &err);
|
rocksdb_flush_cfs(db, flushOpt, cf, nCf, &err);
|
||||||
if (err != NULL) {
|
if (err != NULL) {
|
||||||
qError("failed to flush db before streamBackend clean up, reason:%s", err);
|
stError("failed to flush db before streamBackend clean up, reason:%s", err);
|
||||||
taosMemoryFree(err);
|
taosMemoryFree(err);
|
||||||
code = -1;
|
code = -1;
|
||||||
}
|
}
|
||||||
|
@ -865,7 +875,7 @@ int32_t chkpPreCheckDir(char* path, int64_t chkpId, char** chkpDir, char** chkpI
|
||||||
sprintf(pChkpDir, "%s%s%s", path, TD_DIRSEP, "checkpoints");
|
sprintf(pChkpDir, "%s%s%s", path, TD_DIRSEP, "checkpoints");
|
||||||
code = taosMulModeMkDir(pChkpDir, 0755, true);
|
code = taosMulModeMkDir(pChkpDir, 0755, true);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
qError("failed to prepare checkpoint dir, path:%s, reason:%s", path, tstrerror(code));
|
stError("failed to prepare checkpoint dir, path:%s, reason:%s", path, tstrerror(code));
|
||||||
taosMemoryFree(pChkpDir);
|
taosMemoryFree(pChkpDir);
|
||||||
taosMemoryFree(pChkpIdDir);
|
taosMemoryFree(pChkpIdDir);
|
||||||
code = -1;
|
code = -1;
|
||||||
|
@ -874,7 +884,7 @@ int32_t chkpPreCheckDir(char* path, int64_t chkpId, char** chkpDir, char** chkpI
|
||||||
|
|
||||||
sprintf(pChkpIdDir, "%s%scheckpoint%" PRId64, pChkpDir, TD_DIRSEP, chkpId);
|
sprintf(pChkpIdDir, "%s%scheckpoint%" PRId64, pChkpDir, TD_DIRSEP, chkpId);
|
||||||
if (taosIsDir(pChkpIdDir)) {
|
if (taosIsDir(pChkpIdDir)) {
|
||||||
qInfo("stream rm exist checkpoint%s", pChkpIdDir);
|
stInfo("stream rm exist checkpoint%s", pChkpIdDir);
|
||||||
taosRemoveFile(pChkpIdDir);
|
taosRemoveFile(pChkpIdDir);
|
||||||
}
|
}
|
||||||
*chkpDir = pChkpDir;
|
*chkpDir = pChkpDir;
|
||||||
|
@ -898,19 +908,19 @@ int32_t streamBackendTriggerChkp(void* arg, char* dst) {
|
||||||
goto _ERROR;
|
goto _ERROR;
|
||||||
}
|
}
|
||||||
int32_t nCf = chkpGetAllDbCfHandle(pMeta, &ppCf, refs);
|
int32_t nCf = chkpGetAllDbCfHandle(pMeta, &ppCf, refs);
|
||||||
qDebug("stream backend:%p start to do checkpoint at:%s, cf num: %d ", pHandle, dst, nCf);
|
stDebug("stream backend:%p start to do checkpoint at:%s, cf num: %d ", pHandle, dst, nCf);
|
||||||
|
|
||||||
code = chkpPreFlushDb(pHandle->db, ppCf, nCf);
|
code = chkpPreFlushDb(pHandle->db, ppCf, nCf);
|
||||||
if (code == 0) {
|
if (code == 0) {
|
||||||
code = chkpDoDbCheckpoint(pHandle->db, dst);
|
code = chkpDoDbCheckpoint(pHandle->db, dst);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
qError("stream backend:%p failed to do checkpoint at:%s", pHandle, dst);
|
stError("stream backend:%p failed to do checkpoint at:%s", pHandle, dst);
|
||||||
} else {
|
} else {
|
||||||
qDebug("stream backend:%p end to do checkpoint at:%s, time cost:%" PRId64 "ms", pHandle, dst,
|
stDebug("stream backend:%p end to do checkpoint at:%s, time cost:%" PRId64 "ms", pHandle, dst,
|
||||||
taosGetTimestampMs() - st);
|
taosGetTimestampMs() - st);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
qError("stream backend:%p failed to flush db at:%s", pHandle, dst);
|
stError("stream backend:%p failed to flush db at:%s", pHandle, dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
// release all ref to cfWrapper;
|
// release all ref to cfWrapper;
|
||||||
|
@ -967,24 +977,25 @@ int32_t streamBackendDoCheckpoint(void* arg, uint64_t checkpointId) {
|
||||||
|
|
||||||
SBackendWrapper* pHandle = taosAcquireRef(streamBackendId, backendRid);
|
SBackendWrapper* pHandle = taosAcquireRef(streamBackendId, backendRid);
|
||||||
if (pHandle == NULL || pHandle->db == NULL) {
|
if (pHandle == NULL || pHandle->db == NULL) {
|
||||||
|
stError("failed to acquire state-backend handle");
|
||||||
goto _ERROR;
|
goto _ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get all cf and acquire cfWrappter
|
// Get all cf and acquire cfWrappter
|
||||||
int32_t nCf = chkpGetAllDbCfHandle(pMeta, &ppCf, refs);
|
int32_t nCf = chkpGetAllDbCfHandle(pMeta, &ppCf, refs);
|
||||||
qDebug("stream backend:%p start to do checkpoint at:%s, cf num: %d ", pHandle, pChkpIdDir, nCf);
|
stDebug("stream backend:%p start to do checkpoint at:%s, cf num: %d ", pHandle, pChkpIdDir, nCf);
|
||||||
|
|
||||||
code = chkpPreFlushDb(pHandle->db, ppCf, nCf);
|
code = chkpPreFlushDb(pHandle->db, ppCf, nCf);
|
||||||
if (code == 0) {
|
if (code == 0 && nCf != 0) {
|
||||||
code = chkpDoDbCheckpoint(pHandle->db, pChkpIdDir);
|
code = chkpDoDbCheckpoint(pHandle->db, pChkpIdDir);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
qError("stream backend:%p failed to do checkpoint at:%s", pHandle, pChkpIdDir);
|
stError("stream backend:%p failed to do checkpoint at:%s", pHandle, pChkpIdDir);
|
||||||
} else {
|
} else {
|
||||||
qDebug("stream backend:%p end to do checkpoint at:%s, time cost:%" PRId64 "ms", pHandle, pChkpIdDir,
|
stDebug("stream backend:%p end to do checkpoint at:%s, time cost:%" PRId64 "ms", pHandle, pChkpIdDir,
|
||||||
taosGetTimestampMs() - st);
|
taosGetTimestampMs() - st);
|
||||||
}
|
}
|
||||||
} else {
|
} else if (nCf != 0) {
|
||||||
qError("stream backend:%p failed to flush db at:%s", pHandle, pChkpIdDir);
|
stError("stream backend:%p failed to flush db at:%s", pHandle, pChkpIdDir);
|
||||||
}
|
}
|
||||||
// release all ref to cfWrapper;
|
// release all ref to cfWrapper;
|
||||||
for (int i = 0; i < taosArrayGetSize(refs); i++) {
|
for (int i = 0; i < taosArrayGetSize(refs); i++) {
|
||||||
|
@ -1390,7 +1401,7 @@ int32_t decodeValueFunc(void* value, int32_t vlen, int64_t* ttl, char** dest) {
|
||||||
p = taosDecodeFixedI64(p, &key.unixTimestamp);
|
p = taosDecodeFixedI64(p, &key.unixTimestamp);
|
||||||
p = taosDecodeFixedI32(p, &key.len);
|
p = taosDecodeFixedI32(p, &key.len);
|
||||||
if (vlen != (sizeof(int64_t) + sizeof(int32_t) + key.len)) {
|
if (vlen != (sizeof(int64_t) + sizeof(int32_t) + key.len)) {
|
||||||
qError("vlen: %d, read len: %d", vlen, key.len);
|
stError("vlen: %d, read len: %d", vlen, key.len);
|
||||||
goto _EXCEPT;
|
goto _EXCEPT;
|
||||||
}
|
}
|
||||||
if (key.len != 0 && dest != NULL) p = taosDecodeBinary(p, (void**)dest, key.len);
|
if (key.len != 0 && dest != NULL) p = taosDecodeBinary(p, (void**)dest, key.len);
|
||||||
|
@ -1500,7 +1511,7 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t
|
||||||
|
|
||||||
int idx = streamStateGetCfIdx(NULL, funcname);
|
int idx = streamStateGetCfIdx(NULL, funcname);
|
||||||
if (idx < 0 || idx >= sizeof(ginitDict) / sizeof(ginitDict[0])) {
|
if (idx < 0 || idx >= sizeof(ginitDict) / sizeof(ginitDict[0])) {
|
||||||
qError("failed to open cf");
|
stError("failed to open cf");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
SCfInit* cfPara = &ginitDict[idx];
|
SCfInit* cfPara = &ginitDict[idx];
|
||||||
|
@ -1514,7 +1525,7 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t
|
||||||
rocksdb_t* db = rocksdb_open_column_families(handle->dbOpt, name, nCf, (const char* const*)cfs,
|
rocksdb_t* db = rocksdb_open_column_families(handle->dbOpt, name, nCf, (const char* const*)cfs,
|
||||||
(const rocksdb_options_t* const*)cfOpts, cfHandle, &err);
|
(const rocksdb_options_t* const*)cfOpts, cfHandle, &err);
|
||||||
if (err != NULL) {
|
if (err != NULL) {
|
||||||
qError("failed to open rocksdb cf, reason:%s", err);
|
stError("failed to open rocksdb cf, reason:%s", err);
|
||||||
taosMemoryFree(err);
|
taosMemoryFree(err);
|
||||||
taosMemoryFree(cfHandle);
|
taosMemoryFree(cfHandle);
|
||||||
taosMemoryFree(pCompare);
|
taosMemoryFree(pCompare);
|
||||||
|
@ -1523,7 +1534,7 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t
|
||||||
// fix other leak
|
// fix other leak
|
||||||
return -1;
|
return -1;
|
||||||
} else {
|
} else {
|
||||||
qDebug("succ to open rocksdb cf");
|
stDebug("succ to open rocksdb cf");
|
||||||
}
|
}
|
||||||
// close default cf
|
// close default cf
|
||||||
if (((rocksdb_column_family_handle_t**)cfHandle)[0] != 0) {
|
if (((rocksdb_column_family_handle_t**)cfHandle)[0] != 0) {
|
||||||
|
@ -1634,7 +1645,7 @@ int streamStateOpenBackend(void* backend, SStreamState* pState) {
|
||||||
int64_t id = taosAddRef(streamBackendCfWrapperId, pBackendCfWrapper);
|
int64_t id = taosAddRef(streamBackendCfWrapperId, pBackendCfWrapper);
|
||||||
pState->pTdbState->backendCfWrapperId = id;
|
pState->pTdbState->backendCfWrapperId = id;
|
||||||
pState->pTdbState->pBackendCfWrapper = pBackendCfWrapper;
|
pState->pTdbState->pBackendCfWrapper = pBackendCfWrapper;
|
||||||
qInfo("succ to open state %p on backendWrapper, %p, %s", pState, pBackendCfWrapper, pBackendCfWrapper->idstr);
|
stInfo("succ to open state %p on backendWrapper, %p, %s", pState, pBackendCfWrapper, pBackendCfWrapper->idstr);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
taosThreadMutexUnlock(&handle->cfMutex);
|
taosThreadMutexUnlock(&handle->cfMutex);
|
||||||
|
@ -1686,7 +1697,7 @@ int streamStateOpenBackend(void* backend, SStreamState* pState) {
|
||||||
int64_t id = taosAddRef(streamBackendCfWrapperId, pBackendCfWrapper);
|
int64_t id = taosAddRef(streamBackendCfWrapperId, pBackendCfWrapper);
|
||||||
pState->pTdbState->backendCfWrapperId = id;
|
pState->pTdbState->backendCfWrapperId = id;
|
||||||
pState->pTdbState->pBackendCfWrapper = pBackendCfWrapper;
|
pState->pTdbState->pBackendCfWrapper = pBackendCfWrapper;
|
||||||
qInfo("succ to open state %p on backendWrapper %p %s", pState, pBackendCfWrapper, pBackendCfWrapper->idstr);
|
stInfo("succ to open state %p on backendWrapper %p %s", pState, pBackendCfWrapper, pBackendCfWrapper->idstr);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1694,7 +1705,7 @@ void streamStateCloseBackend(SStreamState* pState, bool remove) {
|
||||||
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
||||||
SBackendWrapper* pHandle = wrapper->pBackend;
|
SBackendWrapper* pHandle = wrapper->pBackend;
|
||||||
|
|
||||||
qInfo("start to close state on backend: %p", pHandle);
|
stInfo("start to close state on backend: %p", pHandle);
|
||||||
|
|
||||||
taosThreadMutexLock(&pHandle->cfMutex);
|
taosThreadMutexLock(&pHandle->cfMutex);
|
||||||
RocksdbCfInst** ppInst = taosHashGet(pHandle->cfInst, wrapper->idstr, strlen(pState->pTdbState->idstr) + 1);
|
RocksdbCfInst** ppInst = taosHashGet(pHandle->cfInst, wrapper->idstr, strlen(pState->pTdbState->idstr) + 1);
|
||||||
|
@ -1706,7 +1717,7 @@ void streamStateCloseBackend(SStreamState* pState, bool remove) {
|
||||||
taosThreadMutexUnlock(&pHandle->cfMutex);
|
taosThreadMutexUnlock(&pHandle->cfMutex);
|
||||||
|
|
||||||
char* status[] = {"close", "drop"};
|
char* status[] = {"close", "drop"};
|
||||||
qInfo("start to %s state %p on backendWrapper %p %s", status[remove == false ? 0 : 1], pState, wrapper,
|
stInfo("start to %s state %p on backendWrapper %p %s", status[remove == false ? 0 : 1], pState, wrapper,
|
||||||
wrapper->idstr);
|
wrapper->idstr);
|
||||||
wrapper->remove |= remove; // update by other pState
|
wrapper->remove |= remove; // update by other pState
|
||||||
taosReleaseRef(streamBackendCfWrapperId, pState->pTdbState->backendCfWrapperId);
|
taosReleaseRef(streamBackendCfWrapperId, pState->pTdbState->backendCfWrapperId);
|
||||||
|
@ -1740,10 +1751,10 @@ int streamStateGetCfIdx(SStreamState* pState, const char* funcName) {
|
||||||
cf = rocksdb_create_column_family(wrapper->rocksdb, wrapper->cfOpts[idx], buf, &err);
|
cf = rocksdb_create_column_family(wrapper->rocksdb, wrapper->cfOpts[idx], buf, &err);
|
||||||
if (err != NULL) {
|
if (err != NULL) {
|
||||||
idx = -1;
|
idx = -1;
|
||||||
qError("failed to open cf, %p %s_%s, reason:%s", pState, wrapper->idstr, funcName, err);
|
stError("failed to open cf, %p %s_%s, reason:%s", pState, wrapper->idstr, funcName, err);
|
||||||
taosMemoryFree(err);
|
taosMemoryFree(err);
|
||||||
} else {
|
} else {
|
||||||
qDebug("succ to to open cf, %p %s_%s", pState, wrapper->idstr, funcName);
|
stDebug("succ to to open cf, %p %s_%s", pState, wrapper->idstr, funcName);
|
||||||
wrapper->pHandle[idx] = cf;
|
wrapper->pHandle[idx] = cf;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1786,7 +1797,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfKe
|
||||||
char* err = NULL; \
|
char* err = NULL; \
|
||||||
int i = streamStateGetCfIdx(pState, funcname); \
|
int i = streamStateGetCfIdx(pState, funcname); \
|
||||||
if (i < 0) { \
|
if (i < 0) { \
|
||||||
qWarn("streamState failed to get cf name: %s", funcname); \
|
stWarn("streamState failed to get cf name: %s", funcname); \
|
||||||
code = -1; \
|
code = -1; \
|
||||||
break; \
|
break; \
|
||||||
} \
|
} \
|
||||||
|
@ -1801,11 +1812,12 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfKe
|
||||||
int32_t ttlVLen = ginitDict[i].enValueFunc((char*)value, vLen, 0, &ttlV); \
|
int32_t ttlVLen = ginitDict[i].enValueFunc((char*)value, vLen, 0, &ttlV); \
|
||||||
rocksdb_put_cf(db, opts, pHandle, (const char*)buf, klen, (const char*)ttlV, (size_t)ttlVLen, &err); \
|
rocksdb_put_cf(db, opts, pHandle, (const char*)buf, klen, (const char*)ttlV, (size_t)ttlVLen, &err); \
|
||||||
if (err != NULL) { \
|
if (err != NULL) { \
|
||||||
qError("streamState str: %s failed to write to %s, err: %s", toString, funcname, err); \
|
stError("streamState str: %s failed to write to %s, err: %s", toString, funcname, err); \
|
||||||
taosMemoryFree(err); \
|
taosMemoryFree(err); \
|
||||||
code = -1; \
|
code = -1; \
|
||||||
} else { \
|
} else { \
|
||||||
qTrace("streamState str:%s succ to write to %s, rowValLen:%d, ttlValLen:%d", toString, funcname, vLen, ttlVLen); \
|
stTrace("streamState str:%s succ to write to %s, rowValLen:%d, ttlValLen:%d", toString, funcname, vLen, \
|
||||||
|
ttlVLen); \
|
||||||
} \
|
} \
|
||||||
taosMemoryFree(ttlV); \
|
taosMemoryFree(ttlV); \
|
||||||
} while (0);
|
} while (0);
|
||||||
|
@ -1817,7 +1829,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfKe
|
||||||
char* err = NULL; \
|
char* err = NULL; \
|
||||||
int i = streamStateGetCfIdx(pState, funcname); \
|
int i = streamStateGetCfIdx(pState, funcname); \
|
||||||
if (i < 0) { \
|
if (i < 0) { \
|
||||||
qWarn("streamState failed to get cf name: %s", funcname); \
|
stWarn("streamState failed to get cf name: %s", funcname); \
|
||||||
code = -1; \
|
code = -1; \
|
||||||
break; \
|
break; \
|
||||||
} \
|
} \
|
||||||
|
@ -1832,9 +1844,9 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfKe
|
||||||
char* val = rocksdb_get_cf(db, opts, pHandle, (const char*)buf, klen, (size_t*)&len, &err); \
|
char* val = rocksdb_get_cf(db, opts, pHandle, (const char*)buf, klen, (size_t*)&len, &err); \
|
||||||
if (val == NULL || len == 0) { \
|
if (val == NULL || len == 0) { \
|
||||||
if (err == NULL) { \
|
if (err == NULL) { \
|
||||||
qTrace("streamState str: %s failed to read from %s_%s, err: not exist", toString, wrapper->idstr, funcname); \
|
stTrace("streamState str: %s failed to read from %s_%s, err: not exist", toString, wrapper->idstr, funcname); \
|
||||||
} else { \
|
} else { \
|
||||||
qError("streamState str: %s failed to read from %s_%s, err: %s", toString, wrapper->idstr, funcname, err); \
|
stError("streamState str: %s failed to read from %s_%s, err: %s", toString, wrapper->idstr, funcname, err); \
|
||||||
taosMemoryFreeClear(err); \
|
taosMemoryFreeClear(err); \
|
||||||
} \
|
} \
|
||||||
code = -1; \
|
code = -1; \
|
||||||
|
@ -1842,11 +1854,11 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfKe
|
||||||
char* p = NULL; \
|
char* p = NULL; \
|
||||||
int32_t tlen = ginitDict[i].deValueFunc(val, len, NULL, (char**)pVal); \
|
int32_t tlen = ginitDict[i].deValueFunc(val, len, NULL, (char**)pVal); \
|
||||||
if (tlen <= 0) { \
|
if (tlen <= 0) { \
|
||||||
qError("streamState str: %s failed to read from %s_%s, err: already ttl ", toString, wrapper->idstr, \
|
stError("streamState str: %s failed to read from %s_%s, err: already ttl ", toString, wrapper->idstr, \
|
||||||
funcname); \
|
funcname); \
|
||||||
code = -1; \
|
code = -1; \
|
||||||
} else { \
|
} else { \
|
||||||
qTrace("streamState str: %s succ to read from %s_%s, valLen:%d", toString, wrapper->idstr, funcname, tlen); \
|
stTrace("streamState str: %s succ to read from %s_%s, valLen:%d", toString, wrapper->idstr, funcname, tlen); \
|
||||||
} \
|
} \
|
||||||
taosMemoryFree(val); \
|
taosMemoryFree(val); \
|
||||||
if (vLen != NULL) *vLen = tlen; \
|
if (vLen != NULL) *vLen = tlen; \
|
||||||
|
@ -1860,7 +1872,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfKe
|
||||||
char* err = NULL; \
|
char* err = NULL; \
|
||||||
int i = streamStateGetCfIdx(pState, funcname); \
|
int i = streamStateGetCfIdx(pState, funcname); \
|
||||||
if (i < 0) { \
|
if (i < 0) { \
|
||||||
qWarn("streamState failed to get cf name: %s_%s", pState->pTdbState->idstr, funcname); \
|
stWarn("streamState failed to get cf name: %s_%s", pState->pTdbState->idstr, funcname); \
|
||||||
code = -1; \
|
code = -1; \
|
||||||
break; \
|
break; \
|
||||||
} \
|
} \
|
||||||
|
@ -1873,11 +1885,11 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfKe
|
||||||
rocksdb_writeoptions_t* opts = wrapper->writeOpts; \
|
rocksdb_writeoptions_t* opts = wrapper->writeOpts; \
|
||||||
rocksdb_delete_cf(db, opts, pHandle, (const char*)buf, klen, &err); \
|
rocksdb_delete_cf(db, opts, pHandle, (const char*)buf, klen, &err); \
|
||||||
if (err != NULL) { \
|
if (err != NULL) { \
|
||||||
qError("streamState str: %s failed to del from %s_%s, err: %s", toString, wrapper->idstr, funcname, err); \
|
stError("streamState str: %s failed to del from %s_%s, err: %s", toString, wrapper->idstr, funcname, err); \
|
||||||
taosMemoryFree(err); \
|
taosMemoryFree(err); \
|
||||||
code = -1; \
|
code = -1; \
|
||||||
} else { \
|
} else { \
|
||||||
qTrace("streamState str: %s succ to del from %s_%s", toString, wrapper->idstr, funcname); \
|
stTrace("streamState str: %s succ to del from %s_%s", toString, wrapper->idstr, funcname); \
|
||||||
} \
|
} \
|
||||||
} while (0);
|
} while (0);
|
||||||
|
|
||||||
|
@ -1902,7 +1914,7 @@ int32_t streamStateDel_rocksdb(SStreamState* pState, const SWinKey* key) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
int32_t streamStateClear_rocksdb(SStreamState* pState) {
|
int32_t streamStateClear_rocksdb(SStreamState* pState) {
|
||||||
qDebug("streamStateClear_rocksdb");
|
stDebug("streamStateClear_rocksdb");
|
||||||
|
|
||||||
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
||||||
|
|
||||||
|
@ -1924,7 +1936,7 @@ int32_t streamStateClear_rocksdb(SStreamState* pState) {
|
||||||
stateKeyToString(&sKey, toStringStart);
|
stateKeyToString(&sKey, toStringStart);
|
||||||
stateKeyToString(&eKey, toStringEnd);
|
stateKeyToString(&eKey, toStringEnd);
|
||||||
|
|
||||||
qWarn("failed to delete range cf(state) start: %s, end:%s, reason:%s", toStringStart, toStringEnd, err);
|
stWarn("failed to delete range cf(state) start: %s, end:%s, reason:%s", toStringStart, toStringEnd, err);
|
||||||
taosMemoryFree(err);
|
taosMemoryFree(err);
|
||||||
} else {
|
} else {
|
||||||
rocksdb_compact_range_cf(wrapper->rocksdb, wrapper->pHandle[1], sKeyStr, sLen, eKeyStr, eLen);
|
rocksdb_compact_range_cf(wrapper->rocksdb, wrapper->pHandle[1], sKeyStr, sLen, eKeyStr, eLen);
|
||||||
|
@ -1941,7 +1953,7 @@ int32_t streamStateCurNext_rocksdb(SStreamState* pState, SStreamStateCur* pCur)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
int32_t streamStateGetFirst_rocksdb(SStreamState* pState, SWinKey* key) {
|
int32_t streamStateGetFirst_rocksdb(SStreamState* pState, SWinKey* key) {
|
||||||
qDebug("streamStateGetFirst_rocksdb");
|
stDebug("streamStateGetFirst_rocksdb");
|
||||||
SWinKey tmp = {.ts = 0, .groupId = 0};
|
SWinKey tmp = {.ts = 0, .groupId = 0};
|
||||||
streamStatePut_rocksdb(pState, &tmp, NULL, 0);
|
streamStatePut_rocksdb(pState, &tmp, NULL, 0);
|
||||||
|
|
||||||
|
@ -1953,7 +1965,7 @@ int32_t streamStateGetFirst_rocksdb(SStreamState* pState, SWinKey* key) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamStateGetGroupKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) {
|
int32_t streamStateGetGroupKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) {
|
||||||
qDebug("streamStateGetGroupKVByCur_rocksdb");
|
stDebug("streamStateGetGroupKVByCur_rocksdb");
|
||||||
if (!pCur) {
|
if (!pCur) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -1970,7 +1982,7 @@ int32_t streamStateGetGroupKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey,
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
int32_t streamStateAddIfNotExist_rocksdb(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
|
int32_t streamStateAddIfNotExist_rocksdb(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
|
||||||
qDebug("streamStateAddIfNotExist_rocksdb");
|
stDebug("streamStateAddIfNotExist_rocksdb");
|
||||||
int32_t size = *pVLen;
|
int32_t size = *pVLen;
|
||||||
if (streamStateGet_rocksdb(pState, key, pVal, pVLen) == 0) {
|
if (streamStateGet_rocksdb(pState, key, pVal, pVLen) == 0) {
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1980,14 +1992,14 @@ int32_t streamStateAddIfNotExist_rocksdb(SStreamState* pState, const SWinKey* ke
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
int32_t streamStateCurPrev_rocksdb(SStreamState* pState, SStreamStateCur* pCur) {
|
int32_t streamStateCurPrev_rocksdb(SStreamState* pState, SStreamStateCur* pCur) {
|
||||||
qDebug("streamStateCurPrev_rocksdb");
|
stDebug("streamStateCurPrev_rocksdb");
|
||||||
if (!pCur) return -1;
|
if (!pCur) return -1;
|
||||||
|
|
||||||
rocksdb_iter_prev(pCur->iter);
|
rocksdb_iter_prev(pCur->iter);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
int32_t streamStateGetKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) {
|
int32_t streamStateGetKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) {
|
||||||
qDebug("streamStateGetKVByCur_rocksdb");
|
stDebug("streamStateGetKVByCur_rocksdb");
|
||||||
if (!pCur) return -1;
|
if (!pCur) return -1;
|
||||||
SStateKey tkey;
|
SStateKey tkey;
|
||||||
SStateKey* pKtmp = &tkey;
|
SStateKey* pKtmp = &tkey;
|
||||||
|
@ -2012,7 +2024,7 @@ int32_t streamStateGetKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, cons
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
SStreamStateCur* streamStateGetAndCheckCur_rocksdb(SStreamState* pState, SWinKey* key) {
|
SStreamStateCur* streamStateGetAndCheckCur_rocksdb(SStreamState* pState, SWinKey* key) {
|
||||||
qDebug("streamStateGetAndCheckCur_rocksdb");
|
stDebug("streamStateGetAndCheckCur_rocksdb");
|
||||||
SStreamStateCur* pCur = streamStateFillGetCur_rocksdb(pState, key);
|
SStreamStateCur* pCur = streamStateFillGetCur_rocksdb(pState, key);
|
||||||
if (pCur) {
|
if (pCur) {
|
||||||
int32_t code = streamStateGetGroupKVByCur_rocksdb(pCur, key, NULL, 0);
|
int32_t code = streamStateGetGroupKVByCur_rocksdb(pCur, key, NULL, 0);
|
||||||
|
@ -2023,7 +2035,7 @@ SStreamStateCur* streamStateGetAndCheckCur_rocksdb(SStreamState* pState, SWinKey
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamStateCur* streamStateSeekKeyNext_rocksdb(SStreamState* pState, const SWinKey* key) {
|
SStreamStateCur* streamStateSeekKeyNext_rocksdb(SStreamState* pState, const SWinKey* key) {
|
||||||
qDebug("streamStateSeekKeyNext_rocksdb");
|
stDebug("streamStateSeekKeyNext_rocksdb");
|
||||||
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
||||||
if (pCur == NULL) {
|
if (pCur == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -2062,7 +2074,7 @@ SStreamStateCur* streamStateSeekKeyNext_rocksdb(SStreamState* pState, const SWin
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamStateCur* streamStateSeekToLast_rocksdb(SStreamState* pState, const SWinKey* key) {
|
SStreamStateCur* streamStateSeekToLast_rocksdb(SStreamState* pState, const SWinKey* key) {
|
||||||
qDebug("streamStateGetCur_rocksdb");
|
stDebug("streamStateGetCur_rocksdb");
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
|
|
||||||
const SStateKey maxStateKey = {.key = {.groupId = UINT64_MAX, .ts = INT64_MAX}, .opNum = INT64_MAX};
|
const SStateKey maxStateKey = {.key = {.groupId = UINT64_MAX, .ts = INT64_MAX}, .opNum = INT64_MAX};
|
||||||
|
@ -2077,7 +2089,7 @@ SStreamStateCur* streamStateSeekToLast_rocksdb(SStreamState* pState, const SWinK
|
||||||
{
|
{
|
||||||
char tbuf[256] = {0};
|
char tbuf[256] = {0};
|
||||||
stateKeyToString((void*)&maxStateKey, tbuf);
|
stateKeyToString((void*)&maxStateKey, tbuf);
|
||||||
qDebug("seek to last:%s", tbuf);
|
stDebug("seek to last:%s", tbuf);
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
||||||
|
@ -2104,7 +2116,7 @@ SStreamStateCur* streamStateSeekToLast_rocksdb(SStreamState* pState, const SWinK
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamStateCur* streamStateGetCur_rocksdb(SStreamState* pState, const SWinKey* key) {
|
SStreamStateCur* streamStateGetCur_rocksdb(SStreamState* pState, const SWinKey* key) {
|
||||||
qDebug("streamStateGetCur_rocksdb");
|
stDebug("streamStateGetCur_rocksdb");
|
||||||
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
||||||
|
|
||||||
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
||||||
|
@ -2161,7 +2173,7 @@ int32_t streamStateSessionPut_rocksdb(SStreamState* pState, const SSessionKey* k
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
int32_t streamStateSessionGet_rocksdb(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen) {
|
int32_t streamStateSessionGet_rocksdb(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen) {
|
||||||
qDebug("streamStateSessionGet_rocksdb");
|
stDebug("streamStateSessionGet_rocksdb");
|
||||||
int code = 0;
|
int code = 0;
|
||||||
SStreamStateCur* pCur = streamStateSessionSeekKeyCurrentNext_rocksdb(pState, key);
|
SStreamStateCur* pCur = streamStateSessionSeekKeyCurrentNext_rocksdb(pState, key);
|
||||||
SSessionKey resKey = *key;
|
SSessionKey resKey = *key;
|
||||||
|
@ -2193,7 +2205,7 @@ int32_t streamStateSessionDel_rocksdb(SStreamState* pState, const SSessionKey* k
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
SStreamStateCur* streamStateSessionSeekKeyCurrentPrev_rocksdb(SStreamState* pState, const SSessionKey* key) {
|
SStreamStateCur* streamStateSessionSeekKeyCurrentPrev_rocksdb(SStreamState* pState, const SSessionKey* key) {
|
||||||
qDebug("streamStateSessionSeekKeyCurrentPrev_rocksdb");
|
stDebug("streamStateSessionSeekKeyCurrentPrev_rocksdb");
|
||||||
|
|
||||||
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
||||||
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
||||||
|
@ -2234,7 +2246,7 @@ SStreamStateCur* streamStateSessionSeekKeyCurrentPrev_rocksdb(SStreamState* pSta
|
||||||
return pCur;
|
return pCur;
|
||||||
}
|
}
|
||||||
SStreamStateCur* streamStateSessionSeekKeyCurrentNext_rocksdb(SStreamState* pState, SSessionKey* key) {
|
SStreamStateCur* streamStateSessionSeekKeyCurrentNext_rocksdb(SStreamState* pState, SSessionKey* key) {
|
||||||
qDebug("streamStateSessionSeekKeyCurrentNext_rocksdb");
|
stDebug("streamStateSessionSeekKeyCurrentNext_rocksdb");
|
||||||
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
||||||
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
||||||
if (pCur == NULL) {
|
if (pCur == NULL) {
|
||||||
|
@ -2272,7 +2284,7 @@ SStreamStateCur* streamStateSessionSeekKeyCurrentNext_rocksdb(SStreamState* pSta
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamStateCur* streamStateSessionSeekKeyNext_rocksdb(SStreamState* pState, const SSessionKey* key) {
|
SStreamStateCur* streamStateSessionSeekKeyNext_rocksdb(SStreamState* pState, const SSessionKey* key) {
|
||||||
qDebug("streamStateSessionSeekKeyNext_rocksdb");
|
stDebug("streamStateSessionSeekKeyNext_rocksdb");
|
||||||
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
||||||
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
||||||
if (pCur == NULL) {
|
if (pCur == NULL) {
|
||||||
|
@ -2311,7 +2323,7 @@ SStreamStateCur* streamStateSessionSeekKeyNext_rocksdb(SStreamState* pState, con
|
||||||
return pCur;
|
return pCur;
|
||||||
}
|
}
|
||||||
int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen) {
|
int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen) {
|
||||||
qDebug("streamStateSessionGetKVByCur_rocksdb");
|
stDebug("streamStateSessionGetKVByCur_rocksdb");
|
||||||
if (!pCur) {
|
if (!pCur) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -2375,7 +2387,7 @@ int32_t streamStateFillDel_rocksdb(SStreamState* pState, const SWinKey* key) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamStateCur* streamStateFillGetCur_rocksdb(SStreamState* pState, const SWinKey* key) {
|
SStreamStateCur* streamStateFillGetCur_rocksdb(SStreamState* pState, const SWinKey* key) {
|
||||||
qDebug("streamStateFillGetCur_rocksdb");
|
stDebug("streamStateFillGetCur_rocksdb");
|
||||||
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
||||||
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
||||||
|
|
||||||
|
@ -2411,7 +2423,7 @@ SStreamStateCur* streamStateFillGetCur_rocksdb(SStreamState* pState, const SWinK
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
int32_t streamStateFillGetKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) {
|
int32_t streamStateFillGetKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) {
|
||||||
qDebug("streamStateFillGetKVByCur_rocksdb");
|
stDebug("streamStateFillGetKVByCur_rocksdb");
|
||||||
if (!pCur) {
|
if (!pCur) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -2435,7 +2447,7 @@ int32_t streamStateFillGetKVByCur_rocksdb(SStreamStateCur* pCur, SWinKey* pKey,
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamStateCur* streamStateFillSeekKeyNext_rocksdb(SStreamState* pState, const SWinKey* key) {
|
SStreamStateCur* streamStateFillSeekKeyNext_rocksdb(SStreamState* pState, const SWinKey* key) {
|
||||||
qDebug("streamStateFillSeekKeyNext_rocksdb");
|
stDebug("streamStateFillSeekKeyNext_rocksdb");
|
||||||
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
||||||
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
||||||
if (!pCur) {
|
if (!pCur) {
|
||||||
|
@ -2473,7 +2485,7 @@ SStreamStateCur* streamStateFillSeekKeyNext_rocksdb(SStreamState* pState, const
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
SStreamStateCur* streamStateFillSeekKeyPrev_rocksdb(SStreamState* pState, const SWinKey* key) {
|
SStreamStateCur* streamStateFillSeekKeyPrev_rocksdb(SStreamState* pState, const SWinKey* key) {
|
||||||
qDebug("streamStateFillSeekKeyPrev_rocksdb");
|
stDebug("streamStateFillSeekKeyPrev_rocksdb");
|
||||||
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
||||||
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
||||||
if (pCur == NULL) {
|
if (pCur == NULL) {
|
||||||
|
@ -2511,7 +2523,7 @@ SStreamStateCur* streamStateFillSeekKeyPrev_rocksdb(SStreamState* pState, const
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
int32_t streamStateSessionGetKeyByRange_rocksdb(SStreamState* pState, const SSessionKey* key, SSessionKey* curKey) {
|
int32_t streamStateSessionGetKeyByRange_rocksdb(SStreamState* pState, const SSessionKey* key, SSessionKey* curKey) {
|
||||||
qDebug("streamStateSessionGetKeyByRange_rocksdb");
|
stDebug("streamStateSessionGetKeyByRange_rocksdb");
|
||||||
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
||||||
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
||||||
if (pCur == NULL) {
|
if (pCur == NULL) {
|
||||||
|
@ -2570,7 +2582,7 @@ int32_t streamStateSessionGetKeyByRange_rocksdb(SStreamState* pState, const SSes
|
||||||
|
|
||||||
int32_t streamStateSessionAddIfNotExist_rocksdb(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal,
|
int32_t streamStateSessionAddIfNotExist_rocksdb(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal,
|
||||||
int32_t* pVLen) {
|
int32_t* pVLen) {
|
||||||
qDebug("streamStateSessionAddIfNotExist_rocksdb");
|
stDebug("streamStateSessionAddIfNotExist_rocksdb");
|
||||||
// todo refactor
|
// todo refactor
|
||||||
int32_t res = 0;
|
int32_t res = 0;
|
||||||
SSessionKey originKey = *key;
|
SSessionKey originKey = *key;
|
||||||
|
@ -2620,7 +2632,7 @@ _end:
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
int32_t streamStateSessionClear_rocksdb(SStreamState* pState) {
|
int32_t streamStateSessionClear_rocksdb(SStreamState* pState) {
|
||||||
qDebug("streamStateSessionClear_rocksdb");
|
stDebug("streamStateSessionClear_rocksdb");
|
||||||
SSessionKey key = {.win.skey = 0, .win.ekey = 0, .groupId = 0};
|
SSessionKey key = {.win.skey = 0, .win.ekey = 0, .groupId = 0};
|
||||||
SStreamStateCur* pCur = streamStateSessionSeekKeyCurrentNext_rocksdb(pState, &key);
|
SStreamStateCur* pCur = streamStateSessionSeekKeyCurrentNext_rocksdb(pState, &key);
|
||||||
|
|
||||||
|
@ -2646,7 +2658,7 @@ int32_t streamStateSessionClear_rocksdb(SStreamState* pState) {
|
||||||
}
|
}
|
||||||
int32_t streamStateStateAddIfNotExist_rocksdb(SStreamState* pState, SSessionKey* key, char* pKeyData,
|
int32_t streamStateStateAddIfNotExist_rocksdb(SStreamState* pState, SSessionKey* key, char* pKeyData,
|
||||||
int32_t keyDataLen, state_key_cmpr_fn fn, void** pVal, int32_t* pVLen) {
|
int32_t keyDataLen, state_key_cmpr_fn fn, void** pVal, int32_t* pVLen) {
|
||||||
qDebug("streamStateStateAddIfNotExist_rocksdb");
|
stDebug("streamStateStateAddIfNotExist_rocksdb");
|
||||||
// todo refactor
|
// todo refactor
|
||||||
int32_t res = 0;
|
int32_t res = 0;
|
||||||
SSessionKey tmpKey = *key;
|
SSessionKey tmpKey = *key;
|
||||||
|
@ -2840,7 +2852,7 @@ int32_t streamStatePutBatch(SStreamState* pState, const char* cfKeyName, rocksdb
|
||||||
|
|
||||||
int i = streamStateGetCfIdx(pState, cfKeyName);
|
int i = streamStateGetCfIdx(pState, cfKeyName);
|
||||||
if (i < 0) {
|
if (i < 0) {
|
||||||
qError("streamState failed to put to cf name:%s", cfKeyName);
|
stError("streamState failed to put to cf name:%s", cfKeyName);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2857,7 +2869,7 @@ int32_t streamStatePutBatch(SStreamState* pState, const char* cfKeyName, rocksdb
|
||||||
{
|
{
|
||||||
char tbuf[256] = {0};
|
char tbuf[256] = {0};
|
||||||
ginitDict[i].toStrFunc((void*)key, tbuf);
|
ginitDict[i].toStrFunc((void*)key, tbuf);
|
||||||
qDebug("streamState str: %s succ to write to %s_%s, len: %d", tbuf, wrapper->idstr, ginitDict[i].key, vlen);
|
stDebug("streamState str: %s succ to write to %s_%s, len: %d", tbuf, wrapper->idstr, ginitDict[i].key, vlen);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2880,7 +2892,7 @@ int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb
|
||||||
{
|
{
|
||||||
char tbuf[256] = {0};
|
char tbuf[256] = {0};
|
||||||
ginitDict[cfIdx].toStrFunc((void*)key, tbuf);
|
ginitDict[cfIdx].toStrFunc((void*)key, tbuf);
|
||||||
qDebug("streamState str: %s succ to write to %s_%s", tbuf, wrapper->idstr, ginitDict[cfIdx].key);
|
stDebug("streamState str: %s succ to write to %s_%s", tbuf, wrapper->idstr, ginitDict[cfIdx].key);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2889,11 +2901,11 @@ int32_t streamStatePutBatch_rocksdb(SStreamState* pState, void* pBatch) {
|
||||||
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
SBackendCfWrapper* wrapper = pState->pTdbState->pBackendCfWrapper;
|
||||||
rocksdb_write(wrapper->rocksdb, wrapper->writeOpts, (rocksdb_writebatch_t*)pBatch, &err);
|
rocksdb_write(wrapper->rocksdb, wrapper->writeOpts, (rocksdb_writebatch_t*)pBatch, &err);
|
||||||
if (err != NULL) {
|
if (err != NULL) {
|
||||||
qError("streamState failed to write batch, err:%s", err);
|
stError("streamState failed to write batch, err:%s", err);
|
||||||
taosMemoryFree(err);
|
taosMemoryFree(err);
|
||||||
return -1;
|
return -1;
|
||||||
} else {
|
} else {
|
||||||
qDebug("write batch to backend:%p", wrapper->pBackend);
|
stDebug("write batch to backend:%p", wrapper->pBackend);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -95,7 +95,7 @@ static int32_t streamAlignCheckpoint(SStreamTask* pTask) {
|
||||||
int32_t num = taosArrayGetSize(pTask->pUpstreamInfoList);
|
int32_t num = taosArrayGetSize(pTask->pUpstreamInfoList);
|
||||||
int64_t old = atomic_val_compare_exchange_32(&pTask->checkpointAlignCnt, 0, num);
|
int64_t old = atomic_val_compare_exchange_32(&pTask->checkpointAlignCnt, 0, num);
|
||||||
if (old == 0) {
|
if (old == 0) {
|
||||||
qDebug("s-task:%s set initial align upstream num:%d", pTask->id.idStr, num);
|
stDebug("s-task:%s set initial align upstream num:%d", pTask->id.idStr, num);
|
||||||
}
|
}
|
||||||
|
|
||||||
return atomic_sub_fetch_32(&pTask->checkpointAlignCnt, 1);
|
return atomic_sub_fetch_32(&pTask->checkpointAlignCnt, 1);
|
||||||
|
@ -136,18 +136,16 @@ int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSo
|
||||||
ASSERT(pTask->info.taskLevel == TASK_LEVEL__SOURCE);
|
ASSERT(pTask->info.taskLevel == TASK_LEVEL__SOURCE);
|
||||||
|
|
||||||
// 1. set task status to be prepared for check point, no data are allowed to put into inputQ.
|
// 1. set task status to be prepared for check point, no data are allowed to put into inputQ.
|
||||||
taosThreadMutexLock(&pTask->lock);
|
|
||||||
|
|
||||||
pTask->status.taskStatus = TASK_STATUS__CK;
|
pTask->status.taskStatus = TASK_STATUS__CK;
|
||||||
pTask->checkpointingId = pReq->checkpointId;
|
pTask->checkpointingId = pReq->checkpointId;
|
||||||
pTask->checkpointNotReadyTasks = streamTaskGetNumOfDownstream(pTask);
|
pTask->checkpointNotReadyTasks = streamTaskGetNumOfDownstream(pTask);
|
||||||
pTask->chkInfo.startTs = taosGetTimestampMs();
|
pTask->chkInfo.startTs = taosGetTimestampMs();
|
||||||
|
|
||||||
|
pTask->execInfo.checkpoint += 1;
|
||||||
|
|
||||||
// 2. let's dispatch checkpoint msg to downstream task directly and do nothing else. put the checkpoint block into
|
// 2. let's dispatch checkpoint msg to downstream task directly and do nothing else. put the checkpoint block into
|
||||||
// inputQ, to make sure all blocks with less version have been handled by this task already.
|
// inputQ, to make sure all blocks with less version have been handled by this task already.
|
||||||
int32_t code = appendCheckpointIntoInputQ(pTask, STREAM_INPUT__CHECKPOINT_TRIGGER);
|
int32_t code = appendCheckpointIntoInputQ(pTask, STREAM_INPUT__CHECKPOINT_TRIGGER);
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -159,6 +157,7 @@ static int32_t continueDispatchCheckpointBlock(SStreamDataBlock* pBlock, SStream
|
||||||
if (code == 0) {
|
if (code == 0) {
|
||||||
streamDispatchStreamBlock(pTask);
|
streamDispatchStreamBlock(pTask);
|
||||||
} else {
|
} else {
|
||||||
|
stError("s-task:%s failed to put checkpoint into outputQ, code:%s", pTask->id.idStr, tstrerror(code));
|
||||||
streamFreeQitem((SStreamQueueItem*)pBlock);
|
streamFreeQitem((SStreamQueueItem*)pBlock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -193,7 +192,7 @@ int32_t streamProcessCheckpointBlock(SStreamTask* pTask, SStreamDataBlock* pBloc
|
||||||
int32_t taskLevel = pTask->info.taskLevel;
|
int32_t taskLevel = pTask->info.taskLevel;
|
||||||
if (taskLevel == TASK_LEVEL__SOURCE) {
|
if (taskLevel == TASK_LEVEL__SOURCE) {
|
||||||
if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||||
qDebug("s-task:%s set childIdx:%d, and add checkpoint block into outputQ", id, pTask->info.selfChildId);
|
stDebug("s-task:%s set childIdx:%d, and add checkpoint block into outputQ", id, pTask->info.selfChildId);
|
||||||
continueDispatchCheckpointBlock(pBlock, pTask);
|
continueDispatchCheckpointBlock(pBlock, pTask);
|
||||||
} else { // only one task exists, no need to dispatch downstream info
|
} else { // only one task exists, no need to dispatch downstream info
|
||||||
streamProcessCheckpointReadyMsg(pTask);
|
streamProcessCheckpointReadyMsg(pTask);
|
||||||
|
@ -201,8 +200,10 @@ int32_t streamProcessCheckpointBlock(SStreamTask* pTask, SStreamDataBlock* pBloc
|
||||||
}
|
}
|
||||||
} else if (taskLevel == TASK_LEVEL__SINK || taskLevel == TASK_LEVEL__AGG) {
|
} else if (taskLevel == TASK_LEVEL__SINK || taskLevel == TASK_LEVEL__AGG) {
|
||||||
ASSERT(taosArrayGetSize(pTask->pUpstreamInfoList) > 0);
|
ASSERT(taosArrayGetSize(pTask->pUpstreamInfoList) > 0);
|
||||||
|
if (pTask->chkInfo.startTs == 0) {
|
||||||
pTask->chkInfo.startTs = taosGetTimestampMs();
|
pTask->chkInfo.startTs = taosGetTimestampMs();
|
||||||
|
pTask->execInfo.checkpoint += 1;
|
||||||
|
}
|
||||||
|
|
||||||
// update the child Id for downstream tasks
|
// update the child Id for downstream tasks
|
||||||
streamAddCheckpointReadyMsg(pTask, pBlock->srcTaskId, pTask->info.selfChildId, checkpointId);
|
streamAddCheckpointReadyMsg(pTask, pBlock->srcTaskId, pTask->info.selfChildId, checkpointId);
|
||||||
|
@ -211,19 +212,19 @@ int32_t streamProcessCheckpointBlock(SStreamTask* pTask, SStreamDataBlock* pBloc
|
||||||
int32_t notReady = streamAlignCheckpoint(pTask);
|
int32_t notReady = streamAlignCheckpoint(pTask);
|
||||||
int32_t num = taosArrayGetSize(pTask->pUpstreamInfoList);
|
int32_t num = taosArrayGetSize(pTask->pUpstreamInfoList);
|
||||||
if (notReady > 0) {
|
if (notReady > 0) {
|
||||||
qDebug("s-task:%s received checkpoint block, idx:%d, %d upstream tasks not send checkpoint info yet, total:%d",
|
stDebug("s-task:%s received checkpoint block, idx:%d, %d upstream tasks not send checkpoint info yet, total:%d",
|
||||||
id, pTask->info.selfChildId, notReady, num);
|
id, pTask->info.selfChildId, notReady, num);
|
||||||
streamFreeQitem((SStreamQueueItem*)pBlock);
|
streamFreeQitem((SStreamQueueItem*)pBlock);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (taskLevel == TASK_LEVEL__SINK) {
|
if (taskLevel == TASK_LEVEL__SINK) {
|
||||||
qDebug("s-task:%s process checkpoint block, all %d upstreams sent checkpoint msgs, send ready msg to upstream",
|
stDebug("s-task:%s process checkpoint block, all %d upstreams sent checkpoint msgs, send ready msg to upstream",
|
||||||
id, num);
|
id, num);
|
||||||
streamFreeQitem((SStreamQueueItem*)pBlock);
|
streamFreeQitem((SStreamQueueItem*)pBlock);
|
||||||
streamTaskBuildCheckpoint(pTask);
|
streamTaskBuildCheckpoint(pTask);
|
||||||
} else {
|
} else {
|
||||||
qDebug(
|
stDebug(
|
||||||
"s-task:%s process checkpoint block, all %d upstreams sent checkpoint msgs, dispatch checkpoint msg "
|
"s-task:%s process checkpoint block, all %d upstreams sent checkpoint msgs, dispatch checkpoint msg "
|
||||||
"downstream", id, num);
|
"downstream", id, num);
|
||||||
|
|
||||||
|
@ -252,12 +253,12 @@ int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask) {
|
||||||
ASSERT(notReady >= 0);
|
ASSERT(notReady >= 0);
|
||||||
|
|
||||||
if (notReady == 0) {
|
if (notReady == 0) {
|
||||||
qDebug("s-task:%s all downstream tasks have completed the checkpoint, start to do checkpoint for current task",
|
stDebug("s-task:%s all downstream tasks have completed the checkpoint, start to do checkpoint for current task",
|
||||||
pTask->id.idStr);
|
pTask->id.idStr);
|
||||||
appendCheckpointIntoInputQ(pTask, STREAM_INPUT__CHECKPOINT);
|
appendCheckpointIntoInputQ(pTask, STREAM_INPUT__CHECKPOINT);
|
||||||
} else {
|
} else {
|
||||||
int32_t total = streamTaskGetNumOfDownstream(pTask);
|
int32_t total = streamTaskGetNumOfDownstream(pTask);
|
||||||
qDebug("s-task:%s %d/%d downstream tasks are not ready, wait", pTask->id.idStr, notReady, total);
|
stDebug("s-task:%s %d/%d downstream tasks are not ready, wait", pTask->id.idStr, notReady, total);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -266,7 +267,6 @@ int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask) {
|
||||||
int32_t streamSaveAllTaskStatus(SStreamMeta* pMeta, int64_t checkpointId) {
|
int32_t streamSaveAllTaskStatus(SStreamMeta* pMeta, int64_t checkpointId) {
|
||||||
taosWLockLatch(&pMeta->lock);
|
taosWLockLatch(&pMeta->lock);
|
||||||
|
|
||||||
int64_t keys[2];
|
|
||||||
for (int32_t i = 0; i < taosArrayGetSize(pMeta->pTaskList); ++i) {
|
for (int32_t i = 0; i < taosArrayGetSize(pMeta->pTaskList); ++i) {
|
||||||
STaskId* pId = taosArrayGet(pMeta->pTaskList, i);
|
STaskId* pId = taosArrayGet(pMeta->pTaskList, i);
|
||||||
SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, pId, sizeof(*pId));
|
SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, pId, sizeof(*pId));
|
||||||
|
@ -288,20 +288,22 @@ int32_t streamSaveAllTaskStatus(SStreamMeta* pMeta, int64_t checkpointId) {
|
||||||
// save the task
|
// save the task
|
||||||
streamMetaSaveTask(pMeta, p);
|
streamMetaSaveTask(pMeta, p);
|
||||||
streamTaskOpenAllUpstreamInput(p); // open inputQ for all upstream tasks
|
streamTaskOpenAllUpstreamInput(p); // open inputQ for all upstream tasks
|
||||||
qDebug("vgId:%d s-task:%s level:%d commit task status after checkpoint completed, checkpointId:%" PRId64
|
|
||||||
", Ver(saved):%" PRId64 " currentVer:%" PRId64 ", status to be normal, prev:%s",
|
stDebug(
|
||||||
|
"vgId:%d s-task:%s level:%d open upstream inputQ, commit task status after checkpoint completed, "
|
||||||
|
"checkpointId:%" PRId64 ", Ver(saved):%" PRId64 " currentVer:%" PRId64 ", status to be normal, prev:%s",
|
||||||
pMeta->vgId, p->id.idStr, p->info.taskLevel, checkpointId, p->chkInfo.checkpointVer, p->chkInfo.nextProcessVer,
|
pMeta->vgId, p->id.idStr, p->info.taskLevel, checkpointId, p->chkInfo.checkpointVer, p->chkInfo.nextProcessVer,
|
||||||
streamGetTaskStatusStr(prev));
|
streamGetTaskStatusStr(prev));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (streamMetaCommit(pMeta) < 0) {
|
if (streamMetaCommit(pMeta) < 0) {
|
||||||
taosWUnLockLatch(&pMeta->lock);
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
qError("vgId:%d failed to commit stream meta after do checkpoint, checkpointId:%" PRId64 ", since %s", pMeta->vgId,
|
stError("vgId:%d failed to commit stream meta after do checkpoint, checkpointId:%" PRId64 ", since %s", pMeta->vgId,
|
||||||
checkpointId, terrstr());
|
checkpointId, terrstr());
|
||||||
return -1;
|
return -1;
|
||||||
} else {
|
} else {
|
||||||
taosWUnLockLatch(&pMeta->lock);
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
qInfo("vgId:%d commit stream meta after do checkpoint, checkpointId:%" PRId64 " DONE", pMeta->vgId, checkpointId);
|
stInfo("vgId:%d commit stream meta after do checkpoint, checkpointId:%" PRId64 " DONE", pMeta->vgId, checkpointId);
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
@ -316,18 +318,21 @@ int32_t streamTaskBuildCheckpoint(SStreamTask* pTask) {
|
||||||
ASSERT(remain >= 0);
|
ASSERT(remain >= 0);
|
||||||
|
|
||||||
double el = (taosGetTimestampMs() - pTask->chkInfo.startTs) / 1000.0;
|
double el = (taosGetTimestampMs() - pTask->chkInfo.startTs) / 1000.0;
|
||||||
|
pTask->chkInfo.startTs = 0; // clear the recorded start time
|
||||||
|
|
||||||
if (remain == 0) { // all tasks are ready
|
if (remain == 0) { // all tasks are ready
|
||||||
qDebug("s-task:%s is ready for checkpoint", pTask->id.idStr);
|
stDebug("s-task:%s all downstreams are ready, ready for do checkpoint", pTask->id.idStr);
|
||||||
streamBackendDoCheckpoint(pMeta, pTask->checkpointingId);
|
streamBackendDoCheckpoint(pMeta, pTask->checkpointingId);
|
||||||
streamSaveAllTaskStatus(pMeta, pTask->checkpointingId);
|
streamSaveAllTaskStatus(pMeta, pTask->checkpointingId);
|
||||||
qDebug("vgId:%d vnode wide checkpoint completed, save all tasks status, elapsed time:%.2f Sec checkpointId:%" PRId64, pMeta->vgId,
|
stInfo(
|
||||||
el, pTask->checkpointingId);
|
"vgId:%d vnode wide checkpoint completed, save all tasks status, last:%s, level:%d elapsed time:%.2f Sec "
|
||||||
|
"checkpointId:%" PRId64,
|
||||||
|
pMeta->vgId, pTask->id.idStr, pTask->info.taskLevel, el, pTask->checkpointingId);
|
||||||
} else {
|
} else {
|
||||||
qDebug(
|
stInfo(
|
||||||
"vgId:%d vnode wide tasks not reach checkpoint ready status, ready s-task:%s, elapsed time:%.2f Sec not "
|
"vgId:%d vnode wide tasks not reach checkpoint ready status, ready s-task:%s, level:%d elapsed time:%.2f Sec "
|
||||||
"ready:%d/%d",
|
"not ready:%d/%d",
|
||||||
pMeta->vgId, pTask->id.idStr, el, remain, pMeta->numOfStreamTasks);
|
pMeta->vgId, pTask->id.idStr, pTask->info.taskLevel, el, remain, pMeta->numOfStreamTasks);
|
||||||
}
|
}
|
||||||
|
|
||||||
// send check point response to upstream task
|
// send check point response to upstream task
|
||||||
|
@ -339,7 +344,7 @@ int32_t streamTaskBuildCheckpoint(SStreamTask* pTask) {
|
||||||
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
// todo: let's retry send rsp to upstream/mnode
|
// todo: let's retry send rsp to upstream/mnode
|
||||||
qError("s-task:%s failed to send checkpoint rsp to upstream, checkpointId:%" PRId64 ", code:%s", pTask->id.idStr,
|
stError("s-task:%s failed to send checkpoint rsp to upstream, checkpointId:%" PRId64 ", code:%s", pTask->id.idStr,
|
||||||
pTask->checkpointingId, tstrerror(code));
|
pTask->checkpointingId, tstrerror(code));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -165,12 +165,16 @@ SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem*
|
||||||
SStreamDataBlock* pBlockSrc = (SStreamDataBlock*)pElem;
|
SStreamDataBlock* pBlockSrc = (SStreamDataBlock*)pElem;
|
||||||
taosArrayAddAll(pBlock->blocks, pBlockSrc->blocks);
|
taosArrayAddAll(pBlock->blocks, pBlockSrc->blocks);
|
||||||
taosArrayDestroy(pBlockSrc->blocks);
|
taosArrayDestroy(pBlockSrc->blocks);
|
||||||
|
streamQueueItemIncSize(dst, streamQueueItemGetSize(pElem));
|
||||||
|
|
||||||
taosFreeQitem(pElem);
|
taosFreeQitem(pElem);
|
||||||
return dst;
|
return dst;
|
||||||
} else if (dst->type == STREAM_INPUT__MERGED_SUBMIT && pElem->type == STREAM_INPUT__DATA_SUBMIT) {
|
} else if (dst->type == STREAM_INPUT__MERGED_SUBMIT && pElem->type == STREAM_INPUT__DATA_SUBMIT) {
|
||||||
SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)dst;
|
SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)dst;
|
||||||
SStreamDataSubmit* pBlockSrc = (SStreamDataSubmit*)pElem;
|
SStreamDataSubmit* pBlockSrc = (SStreamDataSubmit*)pElem;
|
||||||
streamMergeSubmit(pMerged, pBlockSrc);
|
streamMergeSubmit(pMerged, pBlockSrc);
|
||||||
|
streamQueueItemIncSize(dst, streamQueueItemGetSize(pElem));
|
||||||
|
|
||||||
taosFreeQitem(pElem);
|
taosFreeQitem(pElem);
|
||||||
return dst;
|
return dst;
|
||||||
} else if (dst->type == STREAM_INPUT__DATA_SUBMIT && pElem->type == STREAM_INPUT__DATA_SUBMIT) {
|
} else if (dst->type == STREAM_INPUT__DATA_SUBMIT && pElem->type == STREAM_INPUT__DATA_SUBMIT) {
|
||||||
|
@ -180,13 +184,16 @@ SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem*
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
streamQueueItemIncSize((SStreamQueueItem*)pMerged, streamQueueItemGetSize(pElem));
|
||||||
|
|
||||||
streamMergeSubmit(pMerged, (SStreamDataSubmit*)dst);
|
streamMergeSubmit(pMerged, (SStreamDataSubmit*)dst);
|
||||||
streamMergeSubmit(pMerged, (SStreamDataSubmit*)pElem);
|
streamMergeSubmit(pMerged, (SStreamDataSubmit*)pElem);
|
||||||
|
|
||||||
taosFreeQitem(dst);
|
taosFreeQitem(dst);
|
||||||
taosFreeQitem(pElem);
|
taosFreeQitem(pElem);
|
||||||
return (SStreamQueueItem*)pMerged;
|
return (SStreamQueueItem*)pMerged;
|
||||||
} else {
|
} else {
|
||||||
qDebug("block type:%s not merged with existed blocks list, type:%d", streamGetBlockTypeStr(pElem->type), dst->type);
|
stDebug("block type:%s not merged with existed blocks list, type:%d", streamQueueItemGetTypeStr(pElem->type), dst->type);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -227,16 +234,3 @@ void streamFreeQitem(SStreamQueueItem* data) {
|
||||||
taosFreeQitem(pBlock);
|
taosFreeQitem(pBlock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* streamGetBlockTypeStr(int32_t type) {
|
|
||||||
switch (type) {
|
|
||||||
case STREAM_INPUT__CHECKPOINT:
|
|
||||||
return "checkpoint";
|
|
||||||
case STREAM_INPUT__CHECKPOINT_TRIGGER:
|
|
||||||
return "checkpoint-trigger";
|
|
||||||
case STREAM_INPUT__TRANS_STATE:
|
|
||||||
return "trans-state";
|
|
||||||
default:
|
|
||||||
return "";
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -18,10 +18,6 @@
|
||||||
#include "ttimer.h"
|
#include "ttimer.h"
|
||||||
#include "tmisce.h"
|
#include "tmisce.h"
|
||||||
|
|
||||||
#define MAX_BLOCK_NAME_NUM 1024
|
|
||||||
#define DISPATCH_RETRY_INTERVAL_MS 300
|
|
||||||
#define MAX_CONTINUE_RETRY_COUNT 5
|
|
||||||
|
|
||||||
typedef struct SBlockName {
|
typedef struct SBlockName {
|
||||||
uint32_t hashValue;
|
uint32_t hashValue;
|
||||||
char parTbName[TSDB_TABLE_NAME_LEN];
|
char parTbName[TSDB_TABLE_NAME_LEN];
|
||||||
|
@ -53,6 +49,7 @@ void initRpcMsg(SRpcMsg* pMsg, int32_t msgType, void* pCont, int32_t contLen) {
|
||||||
int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* pReq) {
|
int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* pReq) {
|
||||||
if (tStartEncode(pEncoder) < 0) return -1;
|
if (tStartEncode(pEncoder) < 0) return -1;
|
||||||
if (tEncodeI64(pEncoder, pReq->stage) < 0) return -1;
|
if (tEncodeI64(pEncoder, pReq->stage) < 0) return -1;
|
||||||
|
if (tEncodeI32(pEncoder, pReq->msgId) < 0) return -1;
|
||||||
if (tEncodeI32(pEncoder, pReq->srcVgId) < 0) return -1;
|
if (tEncodeI32(pEncoder, pReq->srcVgId) < 0) return -1;
|
||||||
if (tEncodeI32(pEncoder, pReq->type) < 0) return -1;
|
if (tEncodeI32(pEncoder, pReq->type) < 0) return -1;
|
||||||
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
|
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
|
||||||
|
@ -78,6 +75,7 @@ int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* p
|
||||||
int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) {
|
int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) {
|
||||||
if (tStartDecode(pDecoder) < 0) return -1;
|
if (tStartDecode(pDecoder) < 0) return -1;
|
||||||
if (tDecodeI64(pDecoder, &pReq->stage) < 0) return -1;
|
if (tDecodeI64(pDecoder, &pReq->stage) < 0) return -1;
|
||||||
|
if (tDecodeI32(pDecoder, &pReq->msgId) < 0) return -1;
|
||||||
if (tDecodeI32(pDecoder, &pReq->srcVgId) < 0) return -1;
|
if (tDecodeI32(pDecoder, &pReq->srcVgId) < 0) return -1;
|
||||||
if (tDecodeI32(pDecoder, &pReq->type) < 0) return -1;
|
if (tDecodeI32(pDecoder, &pReq->type) < 0) return -1;
|
||||||
if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1;
|
if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1;
|
||||||
|
@ -112,6 +110,7 @@ static int32_t tInitStreamDispatchReq(SStreamDispatchReq* pReq, const SStreamTas
|
||||||
pReq->streamId = pTask->id.streamId;
|
pReq->streamId = pTask->id.streamId;
|
||||||
pReq->srcVgId = vgId;
|
pReq->srcVgId = vgId;
|
||||||
pReq->stage = pTask->pMeta->stage;
|
pReq->stage = pTask->pMeta->stage;
|
||||||
|
pReq->msgId = pTask->execInfo.dispatch;
|
||||||
pReq->upstreamTaskId = pTask->id.taskId;
|
pReq->upstreamTaskId = pTask->id.taskId;
|
||||||
pReq->upstreamChildId = pTask->info.selfChildId;
|
pReq->upstreamChildId = pTask->info.selfChildId;
|
||||||
pReq->upstreamNodeId = pTask->info.nodeId;
|
pReq->upstreamNodeId = pTask->info.nodeId;
|
||||||
|
@ -231,7 +230,7 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock)
|
||||||
}
|
}
|
||||||
|
|
||||||
buf = NULL;
|
buf = NULL;
|
||||||
qDebug("s-task:%s (child %d) send retrieve req to task:0x%x (vgId:%d), reqId:0x%" PRIx64, pTask->id.idStr,
|
stDebug("s-task:%s (child %d) send retrieve req to task:0x%x (vgId:%d), reqId:0x%" PRIx64, pTask->id.idStr,
|
||||||
pTask->info.selfChildId, pEpInfo->taskId, pEpInfo->nodeId, req.reqId);
|
pTask->info.selfChildId, pEpInfo->taskId, pEpInfo->nodeId, req.reqId);
|
||||||
}
|
}
|
||||||
code = 0;
|
code = 0;
|
||||||
|
@ -242,7 +241,7 @@ CLEAR:
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamDispatchCheckMsg(SStreamTask* pTask, const SStreamTaskCheckReq* pReq, int32_t nodeId, SEpSet* pEpSet) {
|
int32_t streamSendCheckMsg(SStreamTask* pTask, const SStreamTaskCheckReq* pReq, int32_t nodeId, SEpSet* pEpSet) {
|
||||||
void* buf = NULL;
|
void* buf = NULL;
|
||||||
int32_t code = -1;
|
int32_t code = -1;
|
||||||
SRpcMsg msg = {0};
|
SRpcMsg msg = {0};
|
||||||
|
@ -270,66 +269,73 @@ int32_t streamDispatchCheckMsg(SStreamTask* pTask, const SStreamTaskCheckReq* pR
|
||||||
tEncoderClear(&encoder);
|
tEncoderClear(&encoder);
|
||||||
|
|
||||||
initRpcMsg(&msg, TDMT_VND_STREAM_TASK_CHECK, buf, tlen + sizeof(SMsgHead));
|
initRpcMsg(&msg, TDMT_VND_STREAM_TASK_CHECK, buf, tlen + sizeof(SMsgHead));
|
||||||
qDebug("s-task:%s (level:%d) send check msg to s-task:0x%" PRIx64 ":0x%x (vgId:%d)", pTask->id.idStr,
|
stDebug("s-task:%s (level:%d) send check msg to s-task:0x%" PRIx64 ":0x%x (vgId:%d)", pTask->id.idStr,
|
||||||
pTask->info.taskLevel, pReq->streamId, pReq->downstreamTaskId, nodeId);
|
pTask->info.taskLevel, pReq->streamId, pReq->downstreamTaskId, nodeId);
|
||||||
|
|
||||||
tmsgSendReq(pEpSet, &msg);
|
tmsgSendReq(pEpSet, &msg);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t doDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData) {
|
void destroyDispatchMsg(SStreamDispatchReq* pReq, int32_t numOfVgroups) {
|
||||||
|
for (int32_t i = 0; i < numOfVgroups; i++) {
|
||||||
|
taosArrayDestroyP(pReq[i].data, taosMemoryFree);
|
||||||
|
taosArrayDestroy(pReq[i].dataLen);
|
||||||
|
}
|
||||||
|
|
||||||
|
taosMemoryFree(pReq);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t getNumOfDispatchBranch(SStreamTask* pTask) {
|
||||||
|
return (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH)
|
||||||
|
? 1
|
||||||
|
: taosArrayGetSize(pTask->shuffleDispatcher.dbInfo.pVgroupInfos);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t doBuildDispatchMsg(SStreamTask* pTask, const SStreamDataBlock* pData) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
int32_t numOfBlocks = taosArrayGetSize(pData->blocks);
|
int32_t numOfBlocks = taosArrayGetSize(pData->blocks);
|
||||||
ASSERT(numOfBlocks != 0);
|
ASSERT(numOfBlocks != 0 && pTask->msgInfo.pData == NULL);
|
||||||
|
|
||||||
|
pTask->msgInfo.dispatchMsgType = pData->type;
|
||||||
|
|
||||||
if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
|
if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
|
||||||
SStreamDispatchReq req = {0};
|
SStreamDispatchReq* pReq = taosMemoryCalloc(1, sizeof(SStreamDispatchReq));
|
||||||
|
|
||||||
int32_t downstreamTaskId = pTask->fixedEpDispatcher.taskId;
|
int32_t downstreamTaskId = pTask->fixedDispatcher.taskId;
|
||||||
code = tInitStreamDispatchReq(&req, pTask, pData->srcVgId, numOfBlocks, downstreamTaskId, pData->type);
|
code = tInitStreamDispatchReq(pReq, pTask, pData->srcVgId, numOfBlocks, downstreamTaskId, pData->type);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfBlocks; i++) {
|
for (int32_t i = 0; i < numOfBlocks; i++) {
|
||||||
SSDataBlock* pDataBlock = taosArrayGet(pData->blocks, i);
|
SSDataBlock* pDataBlock = taosArrayGet(pData->blocks, i);
|
||||||
|
code = streamAddBlockIntoDispatchMsg(pDataBlock, pReq);
|
||||||
code = streamAddBlockIntoDispatchMsg(pDataBlock, &req);
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
taosArrayDestroyP(req.data, taosMemoryFree);
|
destroyDispatchMsg(pReq, 1);
|
||||||
taosArrayDestroy(req.dataLen);
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t vgId = pTask->fixedEpDispatcher.nodeId;
|
pTask->msgInfo.pData = pReq;
|
||||||
SEpSet* pEpSet = &pTask->fixedEpDispatcher.epSet;
|
|
||||||
|
|
||||||
qDebug("s-task:%s (child taskId:%d) fix-dispatch %d block(s) to s-task:0x%x (vgId:%d)", pTask->id.idStr,
|
|
||||||
pTask->info.selfChildId, numOfBlocks, downstreamTaskId, vgId);
|
|
||||||
|
|
||||||
code = doSendDispatchMsg(pTask, &req, vgId, pEpSet);
|
|
||||||
taosArrayDestroyP(req.data, taosMemoryFree);
|
|
||||||
taosArrayDestroy(req.dataLen);
|
|
||||||
return code;
|
|
||||||
} else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
} else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||||
int32_t rspCnt = atomic_load_32(&pTask->shuffleDispatcher.waitingRspCnt);
|
int32_t rspCnt = atomic_load_32(&pTask->shuffleDispatcher.waitingRspCnt);
|
||||||
ASSERT(rspCnt == 0);
|
ASSERT(rspCnt == 0);
|
||||||
|
|
||||||
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
|
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
|
||||||
int32_t vgSz = taosArrayGetSize(vgInfo);
|
int32_t numOfVgroups = taosArrayGetSize(vgInfo);
|
||||||
|
|
||||||
SStreamDispatchReq* pReqs = taosMemoryCalloc(vgSz, sizeof(SStreamDispatchReq));
|
SStreamDispatchReq* pReqs = taosMemoryCalloc(numOfVgroups, sizeof(SStreamDispatchReq));
|
||||||
if (pReqs == NULL) {
|
if (pReqs == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int32_t i = 0; i < vgSz; i++) {
|
for (int32_t i = 0; i < numOfVgroups; i++) {
|
||||||
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
|
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
|
||||||
code = tInitStreamDispatchReq(&pReqs[i], pTask, pData->srcVgId, 0, pVgInfo->taskId, pData->type);
|
code = tInitStreamDispatchReq(&pReqs[i], pTask, pData->srcVgId, 0, pVgInfo->taskId, pData->type);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
goto FAIL_SHUFFLE_DISPATCH;
|
destroyDispatchMsg(pReqs, numOfVgroups);
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -338,50 +344,72 @@ static int32_t doDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* p
|
||||||
|
|
||||||
// TODO: do not use broadcast
|
// TODO: do not use broadcast
|
||||||
if (pDataBlock->info.type == STREAM_DELETE_RESULT || pDataBlock->info.type == STREAM_CHECKPOINT || pDataBlock->info.type == STREAM_TRANS_STATE) {
|
if (pDataBlock->info.type == STREAM_DELETE_RESULT || pDataBlock->info.type == STREAM_CHECKPOINT || pDataBlock->info.type == STREAM_TRANS_STATE) {
|
||||||
for (int32_t j = 0; j < vgSz; j++) {
|
for (int32_t j = 0; j < numOfVgroups; j++) {
|
||||||
if (streamAddBlockIntoDispatchMsg(pDataBlock, &pReqs[j]) < 0) {
|
code = streamAddBlockIntoDispatchMsg(pDataBlock, &pReqs[j]);
|
||||||
goto FAIL_SHUFFLE_DISPATCH;
|
if (code != 0) {
|
||||||
|
destroyDispatchMsg(pReqs, numOfVgroups);
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pReqs[j].blockNum == 0) {
|
if (pReqs[j].blockNum == 0) {
|
||||||
atomic_add_fetch_32(&pTask->shuffleDispatcher.waitingRspCnt, 1);
|
atomic_add_fetch_32(&pTask->shuffleDispatcher.waitingRspCnt, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
pReqs[j].blockNum++;
|
pReqs[j].blockNum++;
|
||||||
}
|
}
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (streamSearchAndAddBlock(pTask, pReqs, pDataBlock, vgSz, pDataBlock->info.id.groupId) < 0) {
|
code = streamSearchAndAddBlock(pTask, pReqs, pDataBlock, numOfVgroups, pDataBlock->info.id.groupId);
|
||||||
goto FAIL_SHUFFLE_DISPATCH;
|
if(code != 0) {
|
||||||
|
destroyDispatchMsg(pReqs, numOfVgroups);
|
||||||
|
return code;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
qDebug("s-task:%s (child taskId:%d) shuffle-dispatch blocks:%d to %d vgroups", pTask->id.idStr,
|
pTask->msgInfo.pData = pReqs;
|
||||||
pTask->info.selfChildId, numOfBlocks, vgSz);
|
}
|
||||||
|
|
||||||
for (int32_t i = 0; i < vgSz; i++) {
|
stDebug("s-task:%s build dispatch msg success, msgId:%d", pTask->id.idStr, pTask->execInfo.dispatch);
|
||||||
if (pReqs[i].blockNum > 0) {
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int32_t sendDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pDispatchMsg) {
|
||||||
|
int32_t code = 0;
|
||||||
|
int32_t msgId = pTask->execInfo.dispatch;
|
||||||
|
const char* id = pTask->id.idStr;
|
||||||
|
|
||||||
|
if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
|
||||||
|
int32_t vgId = pTask->fixedDispatcher.nodeId;
|
||||||
|
SEpSet* pEpSet = &pTask->fixedDispatcher.epSet;
|
||||||
|
int32_t downstreamTaskId = pTask->fixedDispatcher.taskId;
|
||||||
|
|
||||||
|
stDebug("s-task:%s (child taskId:%d) fix-dispatch %d block(s) to s-task:0x%x (vgId:%d), id:%d", id,
|
||||||
|
pTask->info.selfChildId, 1, downstreamTaskId, vgId, msgId);
|
||||||
|
|
||||||
|
code = doSendDispatchMsg(pTask, pDispatchMsg, vgId, pEpSet);
|
||||||
|
} else {
|
||||||
|
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
|
||||||
|
int32_t numOfVgroups = taosArrayGetSize(vgInfo);
|
||||||
|
|
||||||
|
stDebug("s-task:%s (child taskId:%d) start to shuffle-dispatch blocks to %d vgroup(s), msgId:%d",
|
||||||
|
id, pTask->info.selfChildId, numOfVgroups, msgId);
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < numOfVgroups; i++) {
|
||||||
|
if (pDispatchMsg[i].blockNum > 0) {
|
||||||
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
|
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
|
||||||
qDebug("s-task:%s (child taskId:%d) shuffle-dispatch blocks:%d to vgId:%d", pTask->id.idStr,
|
stDebug("s-task:%s (child taskId:%d) shuffle-dispatch blocks:%d to vgId:%d", pTask->id.idStr,
|
||||||
pTask->info.selfChildId, pReqs[i].blockNum, pVgInfo->vgId);
|
pTask->info.selfChildId, pDispatchMsg[i].blockNum, pVgInfo->vgId);
|
||||||
|
|
||||||
code = doSendDispatchMsg(pTask, &pReqs[i], pVgInfo->vgId, &pVgInfo->epSet);
|
code = doSendDispatchMsg(pTask, &pDispatchMsg[i], pVgInfo->vgId, &pVgInfo->epSet);
|
||||||
if (code < 0) {
|
if (code < 0) {
|
||||||
goto FAIL_SHUFFLE_DISPATCH;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
code = 0;
|
stDebug("s-task:%s complete shuffle-dispatch blocks to all %d vnodes, msgId:%d", pTask->id.idStr, numOfVgroups, msgId);
|
||||||
|
|
||||||
FAIL_SHUFFLE_DISPATCH:
|
|
||||||
for (int32_t i = 0; i < vgSz; i++) {
|
|
||||||
taosArrayDestroyP(pReqs[i].data, taosMemoryFree);
|
|
||||||
taosArrayDestroy(pReqs[i].dataLen);
|
|
||||||
}
|
|
||||||
|
|
||||||
taosMemoryFree(pReqs);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
|
@ -389,41 +417,91 @@ static int32_t doDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* p
|
||||||
|
|
||||||
static void doRetryDispatchData(void* param, void* tmrId) {
|
static void doRetryDispatchData(void* param, void* tmrId) {
|
||||||
SStreamTask* pTask = param;
|
SStreamTask* pTask = param;
|
||||||
|
const char* id = pTask->id.idStr;
|
||||||
|
int32_t msgId = pTask->execInfo.dispatch;
|
||||||
|
|
||||||
if (streamTaskShouldStop(&pTask->status)) {
|
if (streamTaskShouldStop(&pTask->status)) {
|
||||||
int8_t ref = atomic_sub_fetch_8(&pTask->status.timerActive, 1);
|
int8_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||||
qDebug("s-task:%s should stop, abort from timer, ref:%d", pTask->id.idStr, ref);
|
stDebug("s-task:%s should stop, abort from timer, ref:%d", pTask->id.idStr, ref);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(pTask->outputInfo.status == TASK_OUTPUT_STATUS__WAIT);
|
ASSERT(pTask->outputInfo.status == TASK_OUTPUT_STATUS__WAIT);
|
||||||
|
|
||||||
int32_t code = doDispatchAllBlocks(pTask, pTask->msgInfo.pData);
|
int32_t code = 0;
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
{
|
||||||
if (!streamTaskShouldStop(&pTask->status)) {
|
SArray* pList = taosArrayDup(pTask->msgInfo.pRetryList, NULL);
|
||||||
qDebug("s-task:%s reset the waitRspCnt to be 0 before launch retry dispatch", pTask->id.idStr);
|
taosArrayClear(pTask->msgInfo.pRetryList);
|
||||||
atomic_store_32(&pTask->shuffleDispatcher.waitingRspCnt, 0);
|
|
||||||
if (streamTaskShouldPause(&pTask->status)) {
|
SStreamDispatchReq *pReq = pTask->msgInfo.pData;
|
||||||
streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS * 10);
|
|
||||||
} else {
|
if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||||
streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
|
||||||
|
int32_t numOfVgroups = taosArrayGetSize(vgInfo);
|
||||||
|
|
||||||
|
int32_t numOfFailed = taosArrayGetSize(pList);
|
||||||
|
stDebug("s-task:%s (child taskId:%d) re-try shuffle-dispatch blocks to %d vgroup(s), msgId:%d",
|
||||||
|
id, pTask->info.selfChildId, numOfFailed, msgId);
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < numOfFailed; i++) {
|
||||||
|
int32_t vgId = *(int32_t*) taosArrayGet(pList, i);
|
||||||
|
|
||||||
|
for(int32_t j = 0; j < numOfVgroups; ++j) {
|
||||||
|
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, j);
|
||||||
|
if (pVgInfo->vgId == vgId) {
|
||||||
|
stDebug("s-task:%s (child taskId:%d) shuffle-dispatch blocks:%d to vgId:%d", pTask->id.idStr,
|
||||||
|
pTask->info.selfChildId, pReq[j].blockNum, pVgInfo->vgId);
|
||||||
|
|
||||||
|
code = doSendDispatchMsg(pTask, &pReq[j], pVgInfo->vgId, &pVgInfo->epSet);
|
||||||
|
if (code < 0) {
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
int32_t ref = atomic_sub_fetch_8(&pTask->status.timerActive, 1);
|
|
||||||
qDebug("s-task:%s should stop, abort from timer, ref:%d", pTask->id.idStr, ref);
|
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
int8_t ref = atomic_sub_fetch_8(&pTask->status.timerActive, 1);
|
|
||||||
qDebug("s-task:%s send success, jump out of timer, ref:%d", pTask->id.idStr, ref);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamRetryDispatchStreamBlock(SStreamTask* pTask, int64_t waitDuration) {
|
stDebug("s-task:%s complete re-try shuffle-dispatch blocks to all %d vnodes, msgId:%d", pTask->id.idStr, numOfFailed, msgId);
|
||||||
qWarn("s-task:%s dispatch data in %" PRId64 "ms, in timer", pTask->id.idStr, waitDuration);
|
|
||||||
if (pTask->launchTaskTimer != NULL) {
|
|
||||||
taosTmrReset(doRetryDispatchData, waitDuration, pTask, streamEnv.timer, &pTask->launchTaskTimer);
|
|
||||||
} else {
|
} else {
|
||||||
pTask->launchTaskTimer = taosTmrStart(doRetryDispatchData, waitDuration, pTask, streamEnv.timer);
|
int32_t vgId = pTask->fixedDispatcher.nodeId;
|
||||||
|
SEpSet* pEpSet = &pTask->fixedDispatcher.epSet;
|
||||||
|
int32_t downstreamTaskId = pTask->fixedDispatcher.taskId;
|
||||||
|
|
||||||
|
stDebug("s-task:%s (child taskId:%d) fix-dispatch %d block(s) to s-task:0x%x (vgId:%d), id:%d", id,
|
||||||
|
pTask->info.selfChildId, 1, downstreamTaskId, vgId, msgId);
|
||||||
|
|
||||||
|
code = doSendDispatchMsg(pTask, pReq, vgId, pEpSet);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
if (!streamTaskShouldStop(&pTask->status)) {
|
||||||
|
// stDebug("s-task:%s reset the waitRspCnt to be 0 before launch retry dispatch", pTask->id.idStr);
|
||||||
|
// atomic_store_32(&pTask->shuffleDispatcher.waitingRspCnt, 0);
|
||||||
|
if (streamTaskShouldPause(&pTask->status)) {
|
||||||
|
streamRetryDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS * 10);
|
||||||
|
} else {
|
||||||
|
streamRetryDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||||
|
stDebug("s-task:%s should stop, abort from timer, ref:%d", pTask->id.idStr, ref);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
int8_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||||
|
stDebug("s-task:%s send success, jump out of timer, ref:%d", pTask->id.idStr, ref);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void streamRetryDispatchData(SStreamTask* pTask, int64_t waitDuration) {
|
||||||
|
pTask->msgInfo.retryCount++;
|
||||||
|
|
||||||
|
stWarn("s-task:%s retry send dispatch data in %" PRId64 "ms, in timer msgId:%d, retryTimes:%d", pTask->id.idStr,
|
||||||
|
waitDuration, pTask->execInfo.dispatch, pTask->msgInfo.retryCount);
|
||||||
|
|
||||||
|
if (pTask->msgInfo.pTimer != NULL) {
|
||||||
|
taosTmrReset(doRetryDispatchData, waitDuration, pTask, streamEnv.timer, &pTask->msgInfo.pTimer);
|
||||||
|
} else {
|
||||||
|
pTask->msgInfo.pTimer = taosTmrStart(doRetryDispatchData, waitDuration, pTask, streamEnv.timer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -500,57 +578,64 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) {
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
int32_t numOfElems = streamQueueGetNumOfItems(pTask->outputInfo.queue);
|
int32_t numOfElems = streamQueueGetNumOfItems(pTask->outputInfo.queue);
|
||||||
if (numOfElems > 0) {
|
if (numOfElems > 0) {
|
||||||
double size = SIZE_IN_MB(taosQueueMemorySize(pTask->outputInfo.queue->pQueue));
|
double size = SIZE_IN_MiB(taosQueueMemorySize(pTask->outputInfo.queue->pQueue));
|
||||||
qDebug("s-task:%s start to dispatch intermediate block to downstream, elem in outputQ:%d, size:%.2fMiB", id, numOfElems, size);
|
stDebug("s-task:%s start to dispatch intermediate block to downstream, elem in outputQ:%d, size:%.2fMiB", id, numOfElems, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
// to make sure only one dispatch is running
|
// to make sure only one dispatch is running
|
||||||
int8_t old =
|
int8_t old =
|
||||||
atomic_val_compare_exchange_8(&pTask->outputInfo.status, TASK_OUTPUT_STATUS__NORMAL, TASK_OUTPUT_STATUS__WAIT);
|
atomic_val_compare_exchange_8(&pTask->outputInfo.status, TASK_OUTPUT_STATUS__NORMAL, TASK_OUTPUT_STATUS__WAIT);
|
||||||
if (old != TASK_OUTPUT_STATUS__NORMAL) {
|
if (old != TASK_OUTPUT_STATUS__NORMAL) {
|
||||||
qDebug("s-task:%s wait for dispatch rsp, not dispatch now, output status:%d", id, old);
|
stDebug("s-task:%s wait for dispatch rsp, not dispatch now, output status:%d", id, old);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(pTask->msgInfo.pData == NULL);
|
ASSERT(pTask->msgInfo.pData == NULL);
|
||||||
qDebug("s-task:%s start to dispatch msg, set output status:%d", id, pTask->outputInfo.status);
|
stDebug("s-task:%s start to dispatch msg, set output status:%d", id, pTask->outputInfo.status);
|
||||||
|
|
||||||
SStreamDataBlock* pBlock = streamQueueNextItem(pTask->outputInfo.queue);
|
SStreamDataBlock* pBlock = streamQueueNextItem(pTask->outputInfo.queue);
|
||||||
if (pBlock == NULL) {
|
if (pBlock == NULL) {
|
||||||
atomic_store_8(&pTask->outputInfo.status, TASK_OUTPUT_STATUS__NORMAL);
|
atomic_store_8(&pTask->outputInfo.status, TASK_OUTPUT_STATUS__NORMAL);
|
||||||
qDebug("s-task:%s not dispatch since no elems in outputQ, output status:%d", id, pTask->outputInfo.status);
|
stDebug("s-task:%s not dispatch since no elems in outputQ, output status:%d", id, pTask->outputInfo.status);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
pTask->msgInfo.pData = pBlock;
|
|
||||||
ASSERT(pBlock->type == STREAM_INPUT__DATA_BLOCK || pBlock->type == STREAM_INPUT__CHECKPOINT_TRIGGER ||
|
ASSERT(pBlock->type == STREAM_INPUT__DATA_BLOCK || pBlock->type == STREAM_INPUT__CHECKPOINT_TRIGGER ||
|
||||||
pBlock->type == STREAM_INPUT__TRANS_STATE);
|
pBlock->type == STREAM_INPUT__TRANS_STATE);
|
||||||
|
|
||||||
int32_t retryCount = 0;
|
int32_t retryCount = 0;
|
||||||
|
pTask->execInfo.dispatch += 1;
|
||||||
|
pTask->msgInfo.startTs = taosGetTimestampMs();
|
||||||
|
|
||||||
|
int32_t code = doBuildDispatchMsg(pTask, pBlock);
|
||||||
|
if (code == 0) {
|
||||||
|
destroyStreamDataBlock(pBlock);
|
||||||
|
} else { // todo handle build dispatch msg failed
|
||||||
|
}
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
int32_t code = doDispatchAllBlocks(pTask, pBlock);
|
code = sendDispatchMsg(pTask, pTask->msgInfo.pData);
|
||||||
if (code == TSDB_CODE_SUCCESS) {
|
if (code == TSDB_CODE_SUCCESS) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
qDebug("s-task:%s failed to dispatch msg to downstream, code:%s, output status:%d, retry cnt:%d", id,
|
stDebug("s-task:%s failed to dispatch msg:%d to downstream, code:%s, output status:%d, retry cnt:%d", id,
|
||||||
tstrerror(terrno), pTask->outputInfo.status, retryCount);
|
pTask->execInfo.dispatch, tstrerror(terrno), pTask->outputInfo.status, retryCount);
|
||||||
|
|
||||||
// todo deal with only partially success dispatch case
|
// todo deal with only partially success dispatch case
|
||||||
atomic_store_32(&pTask->shuffleDispatcher.waitingRspCnt, 0);
|
atomic_store_32(&pTask->shuffleDispatcher.waitingRspCnt, 0);
|
||||||
if (terrno == TSDB_CODE_APP_IS_STOPPING) { // in case of this error, do not retry anymore
|
if (terrno == TSDB_CODE_APP_IS_STOPPING) { // in case of this error, do not retry anymore
|
||||||
destroyStreamDataBlock(pTask->msgInfo.pData);
|
destroyDispatchMsg(pTask->msgInfo.pData, getNumOfDispatchBranch(pTask));
|
||||||
pTask->msgInfo.pData = NULL;
|
pTask->msgInfo.pData = NULL;
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (++retryCount > MAX_CONTINUE_RETRY_COUNT) { // add to timer to retry
|
if (++retryCount > MAX_CONTINUE_RETRY_COUNT) { // add to timer to retry
|
||||||
int8_t ref = atomic_add_fetch_8(&pTask->status.timerActive, 1);
|
int8_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
||||||
|
stDebug("s-task:%s failed to dispatch msg to downstream for %d times, code:%s, add timer to retry in %dms, ref:%d",
|
||||||
qDebug("s-task:%s failed to dispatch msg to downstream for %d times, code:%s, add timer to retry in %dms, ref:%d",
|
|
||||||
pTask->id.idStr, retryCount, tstrerror(terrno), DISPATCH_RETRY_INTERVAL_MS, ref);
|
pTask->id.idStr, retryCount, tstrerror(terrno), DISPATCH_RETRY_INTERVAL_MS, ref);
|
||||||
streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
|
||||||
|
streamRetryDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -569,15 +654,15 @@ int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask) {
|
||||||
|
|
||||||
// serialize
|
// serialize
|
||||||
if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
|
if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
|
||||||
req.downstreamTaskId = pTask->fixedEpDispatcher.taskId;
|
req.downstreamTaskId = pTask->fixedDispatcher.taskId;
|
||||||
pTask->notReadyTasks = 1;
|
pTask->notReadyTasks = 1;
|
||||||
doDispatchScanHistoryFinishMsg(pTask, &req, pTask->fixedEpDispatcher.nodeId, &pTask->fixedEpDispatcher.epSet);
|
doDispatchScanHistoryFinishMsg(pTask, &req, pTask->fixedDispatcher.nodeId, &pTask->fixedDispatcher.epSet);
|
||||||
} else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
} else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||||
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
|
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
|
||||||
int32_t numOfVgs = taosArrayGetSize(vgInfo);
|
int32_t numOfVgs = taosArrayGetSize(vgInfo);
|
||||||
pTask->notReadyTasks = numOfVgs;
|
pTask->notReadyTasks = numOfVgs;
|
||||||
|
|
||||||
qDebug("s-task:%s send scan-history data complete msg to downstream (shuffle-dispatch) %d tasks, status:%s", pTask->id.idStr,
|
stDebug("s-task:%s send scan-history data complete msg to downstream (shuffle-dispatch) %d tasks, status:%s", pTask->id.idStr,
|
||||||
numOfVgs, streamGetTaskStatusStr(pTask->status.taskStatus));
|
numOfVgs, streamGetTaskStatusStr(pTask->status.taskStatus));
|
||||||
for (int32_t i = 0; i < numOfVgs; i++) {
|
for (int32_t i = 0; i < numOfVgs; i++) {
|
||||||
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
|
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
|
||||||
|
@ -585,7 +670,7 @@ int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask) {
|
||||||
doDispatchScanHistoryFinishMsg(pTask, &req, pVgInfo->vgId, &pVgInfo->epSet);
|
doDispatchScanHistoryFinishMsg(pTask, &req, pVgInfo->vgId, &pVgInfo->epSet);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
qDebug("s-task:%s no downstream tasks, invoke scan-history finish rsp directly", pTask->id.idStr);
|
stDebug("s-task:%s no downstream tasks, invoke scan-history finish rsp directly", pTask->id.idStr);
|
||||||
streamProcessScanHistoryFinishRsp(pTask);
|
streamProcessScanHistoryFinishRsp(pTask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -601,12 +686,12 @@ int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask) {
|
||||||
SStreamChkptReadyInfo* pInfo = taosArrayGet(pTask->pReadyMsgList, i);
|
SStreamChkptReadyInfo* pInfo = taosArrayGet(pTask->pReadyMsgList, i);
|
||||||
tmsgSendReq(&pInfo->upstreamNodeEpset, &pInfo->msg);
|
tmsgSendReq(&pInfo->upstreamNodeEpset, &pInfo->msg);
|
||||||
|
|
||||||
qDebug("s-task:%s level:%d checkpoint ready msg sent to upstream:0x%x", pTask->id.idStr, pTask->info.taskLevel,
|
stDebug("s-task:%s level:%d checkpoint ready msg sent to upstream:0x%x", pTask->id.idStr, pTask->info.taskLevel,
|
||||||
pInfo->upStreamTaskId);
|
pInfo->upStreamTaskId);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosArrayClear(pTask->pReadyMsgList);
|
taosArrayClear(pTask->pReadyMsgList);
|
||||||
qDebug("s-task:%s level:%d checkpoint ready msg sent to all %d upstreams", pTask->id.idStr, pTask->info.taskLevel, num);
|
stDebug("s-task:%s level:%d checkpoint ready msg sent to all %d upstreams", pTask->id.idStr, pTask->info.taskLevel, num);
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -619,7 +704,7 @@ int32_t streamTaskSendCheckpointSourceRsp(SStreamTask* pTask) {
|
||||||
tmsgSendRsp(&pInfo->msg);
|
tmsgSendRsp(&pInfo->msg);
|
||||||
|
|
||||||
taosArrayClear(pTask->pReadyMsgList);
|
taosArrayClear(pTask->pReadyMsgList);
|
||||||
qDebug("s-task:%s level:%d source checkpoint completed msg sent to mnode", pTask->id.idStr, pTask->info.taskLevel);
|
stDebug("s-task:%s level:%d source checkpoint completed msg sent to mnode", pTask->id.idStr, pTask->info.taskLevel);
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -691,7 +776,7 @@ int32_t doDispatchScanHistoryFinishMsg(SStreamTask* pTask, const SStreamScanHist
|
||||||
|
|
||||||
tmsgSendReq(pEpSet, &msg);
|
tmsgSendReq(pEpSet, &msg);
|
||||||
const char* pStatus = streamGetTaskStatusStr(pTask->status.taskStatus);
|
const char* pStatus = streamGetTaskStatusStr(pTask->status.taskStatus);
|
||||||
qDebug("s-task:%s status:%s dispatch scan-history finish msg to taskId:0x%x (vgId:%d)", pTask->id.idStr, pStatus,
|
stDebug("s-task:%s status:%s dispatch scan-history finish msg to taskId:0x%x (vgId:%d)", pTask->id.idStr, pStatus,
|
||||||
pReq->downstreamTaskId, vgId);
|
pReq->downstreamTaskId, vgId);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -725,7 +810,7 @@ int32_t doSendDispatchMsg(SStreamTask* pTask, const SStreamDispatchReq* pReq, in
|
||||||
tEncoderClear(&encoder);
|
tEncoderClear(&encoder);
|
||||||
|
|
||||||
initRpcMsg(&msg, pTask->msgInfo.msgType, buf, tlen + sizeof(SMsgHead));
|
initRpcMsg(&msg, pTask->msgInfo.msgType, buf, tlen + sizeof(SMsgHead));
|
||||||
qDebug("s-task:%s dispatch msg to taskId:0x%x vgId:%d data msg", pTask->id.idStr, pReq->taskId, vgId);
|
stDebug("s-task:%s dispatch msg to taskId:0x%x vgId:%d data msg", pTask->id.idStr, pReq->taskId, vgId);
|
||||||
|
|
||||||
return tmsgSendReq(pEpSet, &msg);
|
return tmsgSendReq(pEpSet, &msg);
|
||||||
|
|
||||||
|
@ -764,7 +849,6 @@ int32_t buildCheckpointSourceRsp(SStreamCheckpointSourceReq* pReq, SRpcHandleInf
|
||||||
}
|
}
|
||||||
|
|
||||||
((SMsgHead*)pBuf)->vgId = htonl(pReq->mnodeId);
|
((SMsgHead*)pBuf)->vgId = htonl(pReq->mnodeId);
|
||||||
|
|
||||||
void* abuf = POINTER_SHIFT(pBuf, sizeof(SMsgHead));
|
void* abuf = POINTER_SHIFT(pBuf, sizeof(SMsgHead));
|
||||||
|
|
||||||
tEncoderInit(&encoder, (uint8_t*)abuf, len);
|
tEncoderInit(&encoder, (uint8_t*)abuf, len);
|
||||||
|
@ -786,7 +870,7 @@ int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHa
|
||||||
}
|
}
|
||||||
|
|
||||||
taosArrayPush(pTask->pReadyMsgList, &info);
|
taosArrayPush(pTask->pReadyMsgList, &info);
|
||||||
qDebug("s-task:%s add checkpoint source rsp msg, total:%d", pTask->id.idStr, (int32_t)taosArrayGetSize(pTask->pReadyMsgList));
|
stDebug("s-task:%s add checkpoint source rsp msg, total:%d", pTask->id.idStr, (int32_t)taosArrayGetSize(pTask->pReadyMsgList));
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -836,8 +920,8 @@ int32_t streamAddCheckpointReadyMsg(SStreamTask* pTask, int32_t upstreamTaskId,
|
||||||
initRpcMsg(&info.msg, TDMT_STREAM_TASK_CHECKPOINT_READY, buf, tlen + sizeof(SMsgHead));
|
initRpcMsg(&info.msg, TDMT_STREAM_TASK_CHECKPOINT_READY, buf, tlen + sizeof(SMsgHead));
|
||||||
info.msg.info.noResp = 1; // refactor later.
|
info.msg.info.noResp = 1; // refactor later.
|
||||||
|
|
||||||
qDebug("s-task:%s (level:%d) prepare checkpoint ready msg to upstream s-task:0x%" PRIx64 ":0x%x (vgId:%d) idx:%d",
|
stDebug("s-task:%s (level:%d) prepare checkpoint ready msg to upstream s-task:0x%" PRIx64 ":0x%x (vgId:%d) idx:%d",
|
||||||
pTask->id.idStr, pTask->info.taskLevel, req.streamId, req.upstreamTaskId, req.downstreamNodeId, index);
|
pTask->id.idStr, pTask->info.taskLevel, req.streamId, req.upstreamTaskId, req.upstreamNodeId, index);
|
||||||
|
|
||||||
if (pTask->pReadyMsgList == NULL) {
|
if (pTask->pReadyMsgList == NULL) {
|
||||||
pTask->pReadyMsgList = taosArrayInit(4, sizeof(SStreamChkptReadyInfo));
|
pTask->pReadyMsgList = taosArrayInit(4, sizeof(SStreamChkptReadyInfo));
|
||||||
|
@ -924,7 +1008,7 @@ int32_t streamAddEndScanHistoryMsg(SStreamTask* pTask, SRpcHandleInfo* pRpcInfo,
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
|
||||||
int32_t num = taosArrayGetSize(pTask->pRspMsgList);
|
int32_t num = taosArrayGetSize(pTask->pRspMsgList);
|
||||||
qDebug("s-task:%s add scan history finish rsp msg for task:0x%x, total:%d", pTask->id.idStr, pReq->upstreamTaskId,
|
stDebug("s-task:%s add scan history finish rsp msg for task:0x%x, total:%d", pTask->id.idStr, pReq->upstreamTaskId,
|
||||||
num);
|
num);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -932,103 +1016,36 @@ int32_t streamAddEndScanHistoryMsg(SStreamTask* pTask, SRpcHandleInfo* pRpcInfo,
|
||||||
int32_t streamNotifyUpstreamContinue(SStreamTask* pTask) {
|
int32_t streamNotifyUpstreamContinue(SStreamTask* pTask) {
|
||||||
ASSERT(pTask->info.taskLevel == TASK_LEVEL__AGG || pTask->info.taskLevel == TASK_LEVEL__SINK);
|
ASSERT(pTask->info.taskLevel == TASK_LEVEL__AGG || pTask->info.taskLevel == TASK_LEVEL__SINK);
|
||||||
|
|
||||||
|
const char* id = pTask->id.idStr;
|
||||||
|
int32_t level = pTask->info.taskLevel;
|
||||||
|
|
||||||
int32_t num = taosArrayGetSize(pTask->pRspMsgList);
|
int32_t num = taosArrayGetSize(pTask->pRspMsgList);
|
||||||
for (int32_t i = 0; i < num; ++i) {
|
for (int32_t i = 0; i < num; ++i) {
|
||||||
SStreamContinueExecInfo* pInfo = taosArrayGet(pTask->pRspMsgList, i);
|
SStreamContinueExecInfo* pInfo = taosArrayGet(pTask->pRspMsgList, i);
|
||||||
tmsgSendRsp(&pInfo->msg);
|
tmsgSendRsp(&pInfo->msg);
|
||||||
|
|
||||||
qDebug("s-task:%s level:%d notify upstream:0x%x to continue process data in WAL", pTask->id.idStr, pTask->info.taskLevel,
|
stDebug("s-task:%s level:%d notify upstream:0x%x continuing scan data in WAL", id, level, pInfo->taskId);
|
||||||
pInfo->taskId);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
taosArrayClear(pTask->pRspMsgList);
|
taosArrayClear(pTask->pRspMsgList);
|
||||||
qDebug("s-task:%s level:%d checkpoint ready msg sent to all %d upstreams", pTask->id.idStr, pTask->info.taskLevel,
|
stDebug("s-task:%s level:%d continue process msg sent to all %d upstreams", id, level, num);
|
||||||
num);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code) {
|
|
||||||
const char* id = pTask->id.idStr;
|
|
||||||
|
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
|
||||||
// dispatch message failed: network error, or node not available.
|
|
||||||
// in case of the input queue is full, the code will be TSDB_CODE_SUCCESS, the and pRsp>inputStatus will be set
|
|
||||||
// flag. here we need to retry dispatch this message to downstream task immediately. handle the case the failure
|
|
||||||
// happened too fast.
|
|
||||||
// todo handle the shuffle dispatch failure
|
|
||||||
if (code == TSDB_CODE_STREAM_TASK_NOT_EXIST) { // destination task does not exist, not retry anymore
|
|
||||||
qWarn("s-task:%s failed to dispatch msg to task:0x%x, no retry, since it is destroyed already", id, pRsp->downstreamTaskId);
|
|
||||||
} else {
|
|
||||||
qError("s-task:%s failed to dispatch msg to task:0x%x, code:%s, retry cnt:%d", id, pRsp->downstreamTaskId,
|
|
||||||
tstrerror(code), ++pTask->msgInfo.retryCount);
|
|
||||||
int32_t ret = doDispatchAllBlocks(pTask, pTask->msgInfo.pData);
|
|
||||||
if (ret != TSDB_CODE_SUCCESS) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
qDebug("s-task:%s recv dispatch rsp from 0x%x, downstream task input status:%d code:%d", id, pRsp->downstreamTaskId,
|
|
||||||
pRsp->inputStatus, code);
|
|
||||||
|
|
||||||
// there are other dispatch message not response yet
|
|
||||||
if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
|
||||||
int32_t leftRsp = atomic_sub_fetch_32(&pTask->shuffleDispatcher.waitingRspCnt, 1);
|
|
||||||
qDebug("s-task:%s is shuffle, left waiting rsp %d", id, leftRsp);
|
|
||||||
if (leftRsp > 0) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// transtate msg has been sent to downstream successfully. let's transfer the fill-history task state
|
|
||||||
SStreamDataBlock* p = pTask->msgInfo.pData;
|
|
||||||
if (p->type == STREAM_INPUT__TRANS_STATE) {
|
|
||||||
qDebug("s-task:%s dispatch transtate msg to downstream successfully, start to transfer state", id);
|
|
||||||
ASSERT(pTask->info.fillHistory == 1);
|
|
||||||
code = streamTransferStateToStreamTask(pTask);
|
|
||||||
if (code != TSDB_CODE_SUCCESS) { // todo: do nothing if error happens
|
|
||||||
}
|
|
||||||
|
|
||||||
// now ready for next data output
|
|
||||||
atomic_store_8(&pTask->outputInfo.status, TASK_OUTPUT_STATUS__NORMAL);
|
|
||||||
return TSDB_CODE_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
pTask->msgInfo.retryCount = 0;
|
|
||||||
ASSERT(pTask->outputInfo.status == TASK_OUTPUT_STATUS__WAIT);
|
|
||||||
|
|
||||||
qDebug("s-task:%s output status is set to:%d", id, pTask->outputInfo.status);
|
|
||||||
|
|
||||||
// the input queue of the (down stream) task that receive the output data is full,
|
|
||||||
// so the TASK_INPUT_STATUS_BLOCKED is rsp
|
|
||||||
if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) {
|
|
||||||
pTask->inputInfo.status = TASK_INPUT_STATUS__BLOCKED; // block the input of current task, to push pressure to upstream
|
|
||||||
double el = 0;
|
|
||||||
if (pTask->msgInfo.blockingTs == 0) {
|
|
||||||
pTask->msgInfo.blockingTs = taosGetTimestampMs(); // record the blocking start time
|
|
||||||
} else {
|
|
||||||
el = (taosGetTimestampMs() - pTask->msgInfo.blockingTs) / 1000.0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int8_t ref = atomic_add_fetch_8(&pTask->status.timerActive, 1);
|
|
||||||
qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64
|
|
||||||
" wait for %dms and retry dispatch data, total wait:%.2fSec ref:%d",
|
|
||||||
id, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, DISPATCH_RETRY_INTERVAL_MS, el, ref);
|
|
||||||
streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
|
||||||
} else { // pipeline send data in output queue
|
|
||||||
// this message has been sent successfully, let's try next one.
|
// this message has been sent successfully, let's try next one.
|
||||||
destroyStreamDataBlock(pTask->msgInfo.pData);
|
static int32_t handleDispatchSuccessRsp(SStreamTask* pTask, int32_t downstreamId) {
|
||||||
|
destroyDispatchMsg(pTask->msgInfo.pData, getNumOfDispatchBranch(pTask));
|
||||||
pTask->msgInfo.pData = NULL;
|
pTask->msgInfo.pData = NULL;
|
||||||
|
|
||||||
if (pTask->msgInfo.blockingTs != 0) {
|
int64_t el = taosGetTimestampMs() - pTask->msgInfo.startTs;
|
||||||
int64_t el = taosGetTimestampMs() - pTask->msgInfo.blockingTs;
|
|
||||||
qDebug("s-task:%s downstream task:0x%x resume to normal from inputQ blocking, blocking time:%" PRId64 "ms", id,
|
|
||||||
pRsp->downstreamTaskId, el);
|
|
||||||
pTask->msgInfo.blockingTs = 0;
|
|
||||||
|
|
||||||
// put data into inputQ of current task is also allowed
|
// put data into inputQ of current task is also allowed
|
||||||
|
if (pTask->inputInfo.status == TASK_INPUT_STATUS__BLOCKED) {
|
||||||
pTask->inputInfo.status = TASK_INPUT_STATUS__NORMAL;
|
pTask->inputInfo.status = TASK_INPUT_STATUS__NORMAL;
|
||||||
|
stDebug("s-task:%s downstream task:0x%x resume to normal from inputQ blocking, blocking time:%" PRId64 "ms",
|
||||||
|
pTask->id.idStr, downstreamId, el);
|
||||||
|
} else {
|
||||||
|
stDebug("s-task:%s dispatch completed, elapsed time:%"PRId64"ms", pTask->id.idStr, el);
|
||||||
}
|
}
|
||||||
|
|
||||||
// now ready for next data output
|
// now ready for next data output
|
||||||
|
@ -1036,6 +1053,114 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
|
||||||
|
|
||||||
// otherwise, continue dispatch the first block to down stream task in pipeline
|
// otherwise, continue dispatch the first block to down stream task in pipeline
|
||||||
streamDispatchStreamBlock(pTask);
|
streamDispatchStreamBlock(pTask);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code) {
|
||||||
|
const char* id = pTask->id.idStr;
|
||||||
|
int32_t vgId = pTask->pMeta->vgId;
|
||||||
|
int32_t msgId = pTask->execInfo.dispatch;
|
||||||
|
|
||||||
|
if ((pTask->pMeta->role == NODE_ROLE_FOLLOWER) || (pTask->status.downstreamReady != 1)) {
|
||||||
|
stError("s-task:%s vgId:%d is follower or task just re-launched, not handle the dispatch rsp, discard it", id, vgId);
|
||||||
|
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((pRsp->msgId != msgId) || (pRsp->stage != pTask->pMeta->stage)) {
|
||||||
|
stError("s-task:%s vgId:%d not expect rsp, expected: msgId:%d, stage:%" PRId64 " actual msgId:%d, stage:%" PRId64
|
||||||
|
" discard it",
|
||||||
|
id, vgId, msgId, pTask->pMeta->stage, pRsp->msgId, pRsp->stage);
|
||||||
|
return TSDB_CODE_INVALID_MSG;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
// dispatch message failed: network error, or node not available.
|
||||||
|
// in case of the input queue is full, the code will be TSDB_CODE_SUCCESS, the and pRsp->inputStatus will be set
|
||||||
|
// flag. Here we need to retry dispatch this message to downstream task immediately. handle the case the failure
|
||||||
|
// happened too fast.
|
||||||
|
if (code == TSDB_CODE_STREAM_TASK_NOT_EXIST) { // destination task does not exist, not retry anymore
|
||||||
|
stError("s-task:%s failed to dispatch msg to task:0x%x(vgId:%d), msgId:%d no retry, since task destroyed already", id,
|
||||||
|
pRsp->downstreamTaskId, pRsp->downstreamNodeId, msgId);
|
||||||
|
} else {
|
||||||
|
stError("s-task:%s failed to dispatch msgId:%d to task:0x%x(vgId:%d), code:%s, add to retry list", id, msgId,
|
||||||
|
pRsp->downstreamTaskId, pRsp->downstreamNodeId, tstrerror(code));
|
||||||
|
taosThreadMutexLock(&pTask->lock);
|
||||||
|
taosArrayPush(pTask->msgInfo.pRetryList, &pRsp->downstreamNodeId);
|
||||||
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
} else { // code == 0
|
||||||
|
if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) {
|
||||||
|
pTask->inputInfo.status = TASK_INPUT_STATUS__BLOCKED;
|
||||||
|
// block the input of current task, to push pressure to upstream
|
||||||
|
taosThreadMutexLock(&pTask->lock);
|
||||||
|
taosArrayPush(pTask->msgInfo.pRetryList, &pRsp->downstreamNodeId);
|
||||||
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
|
||||||
|
stError("s-task:%s inputQ of downstream task:0x%x(vgId:%d) is full, wait for %dms and retry dispatch data", id,
|
||||||
|
pRsp->downstreamTaskId, pRsp->downstreamNodeId, DISPATCH_RETRY_INTERVAL_MS);
|
||||||
|
} else if (pRsp->inputStatus == TASK_INPUT_STATUS__REFUSED) {
|
||||||
|
stError("s-task:%s downstream task:0x%x(vgId:%d) refused the dispatch msg, treat it as success", id,
|
||||||
|
pRsp->downstreamTaskId, pRsp->downstreamNodeId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t leftRsp = 0;
|
||||||
|
if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||||
|
leftRsp = atomic_sub_fetch_32(&pTask->shuffleDispatcher.waitingRspCnt, 1);
|
||||||
|
ASSERT(leftRsp >= 0);
|
||||||
|
|
||||||
|
if (leftRsp > 0) {
|
||||||
|
stDebug( "s-task:%s recv dispatch rsp, msgId:%d from 0x%x(vgId:%d), downstream task input status:%d code:%d, waiting for %d rsp",
|
||||||
|
id, msgId, pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->inputStatus, code, leftRsp);
|
||||||
|
} else {
|
||||||
|
stDebug(
|
||||||
|
"s-task:%s recv dispatch rsp, msgId:%d from 0x%x(vgId:%d), downstream task input status:%d code:%d, all rsp",
|
||||||
|
id, msgId, pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->inputStatus, code);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
stDebug("s-task:%s recv fix-dispatch rsp, msgId:%d from 0x%x(vgId:%d), downstream task input status:%d code:%d",
|
||||||
|
id, msgId, pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->inputStatus, code);
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT(leftRsp >= 0);
|
||||||
|
|
||||||
|
// all msg rsp already, continue
|
||||||
|
if (leftRsp == 0) {
|
||||||
|
ASSERT(pTask->outputInfo.status == TASK_OUTPUT_STATUS__WAIT);
|
||||||
|
stDebug("s-task:%s output status is set to:%d", id, pTask->outputInfo.status);
|
||||||
|
|
||||||
|
// we need to re-try send dispatch msg to downstream tasks
|
||||||
|
int32_t numOfFailed = taosArrayGetSize(pTask->msgInfo.pRetryList);
|
||||||
|
if (numOfFailed > 0) {
|
||||||
|
if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||||
|
atomic_store_32(&pTask->shuffleDispatcher.waitingRspCnt, numOfFailed);
|
||||||
|
stDebug("s-task:%s waiting rsp set to be %d", id, pTask->shuffleDispatcher.waitingRspCnt);
|
||||||
|
}
|
||||||
|
|
||||||
|
int8_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
||||||
|
stDebug("s-task:%s failed to dispatch msg to downstream code:%s, add timer to retry in %dms, ref:%d",
|
||||||
|
pTask->id.idStr, tstrerror(terrno), DISPATCH_RETRY_INTERVAL_MS, ref);
|
||||||
|
|
||||||
|
streamRetryDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS);
|
||||||
|
} else { // this message has been sent successfully, let's try next one.
|
||||||
|
pTask->msgInfo.retryCount = 0;
|
||||||
|
|
||||||
|
// transtate msg has been sent to downstream successfully. let's transfer the fill-history task state
|
||||||
|
if (pTask->msgInfo.dispatchMsgType == STREAM_INPUT__TRANS_STATE) {
|
||||||
|
stDebug("s-task:%s dispatch transtate msgId:%d to downstream successfully, start to transfer state", id, msgId);
|
||||||
|
ASSERT(pTask->info.fillHistory == 1);
|
||||||
|
|
||||||
|
code = streamTransferStateToStreamTask(pTask);
|
||||||
|
if (code != TSDB_CODE_SUCCESS) { // todo: do nothing if error happens
|
||||||
|
}
|
||||||
|
|
||||||
|
// now ready for next data output
|
||||||
|
atomic_store_8(&pTask->outputInfo.status, TASK_OUTPUT_STATUS__NORMAL);
|
||||||
|
} else {
|
||||||
|
handleDispatchSuccessRsp(pTask, pRsp->downstreamTaskId);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -66,13 +66,13 @@ static int32_t doDumpResult(SStreamTask* pTask, SStreamQueueItem* pItem, SArray*
|
||||||
|
|
||||||
SStreamDataBlock* pStreamBlocks = createStreamBlockFromResults(pItem, pTask, size, pRes);
|
SStreamDataBlock* pStreamBlocks = createStreamBlockFromResults(pItem, pTask, size, pRes);
|
||||||
if (pStreamBlocks == NULL) {
|
if (pStreamBlocks == NULL) {
|
||||||
qError("s-task:%s failed to create result stream data block, code:%s", pTask->id.idStr, tstrerror(terrno));
|
stError("s-task:%s failed to create result stream data block, code:%s", pTask->id.idStr, tstrerror(terrno));
|
||||||
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
qDebug("s-task:%s dump stream result data blocks, num:%d, size:%.2fMiB", pTask->id.idStr, numOfBlocks,
|
stDebug("s-task:%s dump stream result data blocks, num:%d, size:%.2fMiB", pTask->id.idStr, numOfBlocks,
|
||||||
SIZE_IN_MB(size));
|
SIZE_IN_MiB(size));
|
||||||
|
|
||||||
int32_t code = doOutputResultBlockImpl(pTask, pStreamBlocks);
|
int32_t code = doOutputResultBlockImpl(pTask, pStreamBlocks);
|
||||||
if (code != TSDB_CODE_SUCCESS) { // back pressure and record position
|
if (code != TSDB_CODE_SUCCESS) { // back pressure and record position
|
||||||
|
@ -109,7 +109,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pTask->inputInfo.status == TASK_INPUT_STATUS__BLOCKED) {
|
if (pTask->inputInfo.status == TASK_INPUT_STATUS__BLOCKED) {
|
||||||
qWarn("s-task:%s downstream task inputQ blocked, idle for 1sec and retry exec task", pTask->id.idStr);
|
stWarn("s-task:%s downstream task inputQ blocked, idle for 1sec and retry exec task", pTask->id.idStr);
|
||||||
taosMsleep(1000);
|
taosMsleep(1000);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -121,7 +121,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i
|
||||||
resetTaskInfo(pExecutor);
|
resetTaskInfo(pExecutor);
|
||||||
}
|
}
|
||||||
|
|
||||||
qError("unexpected stream execution, s-task:%s since %s", pTask->id.idStr, tstrerror(code));
|
stError("unexpected stream execution, s-task:%s since %s", pTask->id.idStr, tstrerror(code));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,7 +137,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i
|
||||||
taosArrayPush(pRes, &block);
|
taosArrayPush(pRes, &block);
|
||||||
numOfBlocks += 1;
|
numOfBlocks += 1;
|
||||||
|
|
||||||
qDebug("s-task:%s(child %d) retrieve process completed, reqId:0x%" PRIx64 " dump results", pTask->id.idStr,
|
stDebug("s-task:%s(child %d) retrieve process completed, reqId:0x%" PRIx64 " dump results", pTask->id.idStr,
|
||||||
pTask->info.selfChildId, pRetrieveBlock->reqId);
|
pTask->info.selfChildId, pRetrieveBlock->reqId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -162,8 +162,8 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i
|
||||||
|
|
||||||
taosArrayPush(pRes, &block);
|
taosArrayPush(pRes, &block);
|
||||||
|
|
||||||
qDebug("s-task:%s (child %d) executed and get %d result blocks, size:%.2fMiB", pTask->id.idStr,
|
stDebug("s-task:%s (child %d) executed and get %d result blocks, size:%.2fMiB", pTask->id.idStr,
|
||||||
pTask->info.selfChildId, numOfBlocks, SIZE_IN_MB(size));
|
pTask->info.selfChildId, numOfBlocks, SIZE_IN_MiB(size));
|
||||||
|
|
||||||
// current output should be dispatched to down stream nodes
|
// current output should be dispatched to down stream nodes
|
||||||
if (numOfBlocks >= STREAM_RESULT_DUMP_THRESHOLD || size >= STREAM_RESULT_DUMP_SIZE_THRESHOLD) {
|
if (numOfBlocks >= STREAM_RESULT_DUMP_THRESHOLD || size >= STREAM_RESULT_DUMP_SIZE_THRESHOLD) {
|
||||||
|
@ -192,18 +192,16 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i
|
||||||
int32_t streamScanHistoryData(SStreamTask* pTask) {
|
int32_t streamScanHistoryData(SStreamTask* pTask) {
|
||||||
ASSERT(pTask->info.taskLevel == TASK_LEVEL__SOURCE);
|
ASSERT(pTask->info.taskLevel == TASK_LEVEL__SOURCE);
|
||||||
|
|
||||||
int32_t size = 0;
|
|
||||||
int32_t code = TSDB_CODE_SUCCESS;
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
void* exec = pTask->exec.pExecutor;
|
void* exec = pTask->exec.pExecutor;
|
||||||
bool finished = false;
|
bool finished = false;
|
||||||
int32_t outputBatchSize = 100;
|
|
||||||
|
|
||||||
qSetStreamOpOpen(exec);
|
qSetStreamOpOpen(exec);
|
||||||
|
|
||||||
while (!finished) {
|
while (!finished) {
|
||||||
if (streamTaskShouldPause(&pTask->status)) {
|
if (streamTaskShouldPause(&pTask->status)) {
|
||||||
double el = (taosGetTimestampMs() - pTask->taskExecInfo.step1Start) / 1000.0;
|
double el = (taosGetTimestampMs() - pTask->execInfo.step1Start) / 1000.0;
|
||||||
qDebug("s-task:%s paused from the scan-history task, elapsed time:%.2fsec", pTask->id.idStr, el);
|
stDebug("s-task:%s paused from the scan-history task, elapsed time:%.2fsec", pTask->id.idStr, el);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -213,6 +211,7 @@ int32_t streamScanHistoryData(SStreamTask* pTask) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t size = 0;
|
||||||
int32_t numOfBlocks = 0;
|
int32_t numOfBlocks = 0;
|
||||||
while (1) {
|
while (1) {
|
||||||
if (streamTaskShouldStop(&pTask->status)) {
|
if (streamTaskShouldStop(&pTask->status)) {
|
||||||
|
@ -221,7 +220,7 @@ int32_t streamScanHistoryData(SStreamTask* pTask) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pTask->inputInfo.status == TASK_INPUT_STATUS__BLOCKED) {
|
if (pTask->inputInfo.status == TASK_INPUT_STATUS__BLOCKED) {
|
||||||
qDebug("s-task:%s inputQ is blocked, wait for 10sec and retry", pTask->id.idStr);
|
stDebug("s-task:%s inputQ is blocked, wait for 10sec and retry", pTask->id.idStr);
|
||||||
taosMsleep(10000);
|
taosMsleep(10000);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -230,7 +229,7 @@ int32_t streamScanHistoryData(SStreamTask* pTask) {
|
||||||
uint64_t ts = 0;
|
uint64_t ts = 0;
|
||||||
code = qExecTask(exec, &output, &ts);
|
code = qExecTask(exec, &output, &ts);
|
||||||
if (code != TSDB_CODE_TSC_QUERY_KILLED && code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_TSC_QUERY_KILLED && code != TSDB_CODE_SUCCESS) {
|
||||||
qError("%s scan-history data error occurred code:%s, continue scan", pTask->id.idStr, tstrerror(code));
|
stError("%s scan-history data error occurred code:%s, continue scan", pTask->id.idStr, tstrerror(code));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -247,9 +246,10 @@ int32_t streamScanHistoryData(SStreamTask* pTask) {
|
||||||
|
|
||||||
size += blockDataGetSize(output) + sizeof(SSDataBlock) + sizeof(SColumnInfoData) * blockDataGetNumOfCols(&block);
|
size += blockDataGetSize(output) + sizeof(SSDataBlock) + sizeof(SColumnInfoData) * blockDataGetNumOfCols(&block);
|
||||||
|
|
||||||
if ((++numOfBlocks) >= outputBatchSize || size >= STREAM_RESULT_DUMP_SIZE_THRESHOLD) {
|
if ((++numOfBlocks) >= STREAM_RESULT_DUMP_THRESHOLD || size >= STREAM_RESULT_DUMP_SIZE_THRESHOLD) {
|
||||||
qDebug("s-task:%s scan exec numOfBlocks:%d, output num-limit:%d, size-limit:%d reached", pTask->id.idStr, numOfBlocks,
|
stDebug("s-task:%s scan exec numOfBlocks:%d, size:%.2fKiB output num-limit:%d, size-limit:%.2fKiB reached",
|
||||||
outputBatchSize, STREAM_RESULT_DUMP_SIZE_THRESHOLD);
|
pTask->id.idStr, numOfBlocks, SIZE_IN_KiB(size), STREAM_RESULT_DUMP_THRESHOLD,
|
||||||
|
SIZE_IN_KiB(STREAM_RESULT_DUMP_SIZE_THRESHOLD));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -258,11 +258,8 @@ int32_t streamScanHistoryData(SStreamTask* pTask) {
|
||||||
SStreamDataBlock* pStreamBlocks = createStreamBlockFromResults(NULL, pTask, size, pRes);
|
SStreamDataBlock* pStreamBlocks = createStreamBlockFromResults(NULL, pTask, size, pRes);
|
||||||
code = doOutputResultBlockImpl(pTask, pStreamBlocks);
|
code = doOutputResultBlockImpl(pTask, pStreamBlocks);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
destroyStreamDataBlock(pStreamBlocks);
|
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
size = 0;
|
|
||||||
} else {
|
} else {
|
||||||
taosArrayDestroy(pRes);
|
taosArrayDestroy(pRes);
|
||||||
}
|
}
|
||||||
|
@ -277,14 +274,14 @@ static void waitForTaskIdle(SStreamTask* pTask, SStreamTask* pStreamTask) {
|
||||||
|
|
||||||
int64_t st = taosGetTimestampMs();
|
int64_t st = taosGetTimestampMs();
|
||||||
while (!streamTaskIsIdle(pStreamTask)) {
|
while (!streamTaskIsIdle(pStreamTask)) {
|
||||||
qDebug("s-task:%s level:%d wait for stream task:%s to be idle, check again in 100ms", id, pTask->info.taskLevel,
|
stDebug("s-task:%s level:%d wait for stream task:%s to be idle, check again in 100ms", id, pTask->info.taskLevel,
|
||||||
pStreamTask->id.idStr);
|
pStreamTask->id.idStr);
|
||||||
taosMsleep(100);
|
taosMsleep(100);
|
||||||
}
|
}
|
||||||
|
|
||||||
double el = (taosGetTimestampMs() - st) / 1000.0;
|
double el = (taosGetTimestampMs() - st) / 1000.0;
|
||||||
if (el > 0) {
|
if (el > 0) {
|
||||||
qDebug("s-task:%s wait for stream task:%s for %.2fs to be idle", id, pStreamTask->id.idStr, el);
|
stDebug("s-task:%s wait for stream task:%s for %.2fs to be idle", id, pStreamTask->id.idStr, el);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -293,7 +290,7 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) {
|
||||||
|
|
||||||
SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId);
|
SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId);
|
||||||
if (pStreamTask == NULL) {
|
if (pStreamTask == NULL) {
|
||||||
qError(
|
stError(
|
||||||
"s-task:%s failed to find related stream task:0x%x, it may have been destroyed or closed, destroy the related "
|
"s-task:%s failed to find related stream task:0x%x, it may have been destroyed or closed, destroy the related "
|
||||||
"fill-history task",
|
"fill-history task",
|
||||||
pTask->id.idStr, (int32_t) pTask->streamTaskId.taskId);
|
pTask->id.idStr, (int32_t) pTask->streamTaskId.taskId);
|
||||||
|
@ -309,11 +306,11 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) {
|
||||||
taosWUnLockLatch(&pMeta->lock);
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||||
} else {
|
} else {
|
||||||
qDebug("s-task:%s fill-history task end, update related stream task:%s info, transfer exec state", pTask->id.idStr,
|
stDebug("s-task:%s fill-history task end, update related stream task:%s info, transfer exec state", pTask->id.idStr,
|
||||||
pStreamTask->id.idStr);
|
pStreamTask->id.idStr);
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(pStreamTask->historyTaskId.taskId == pTask->id.taskId && pTask->status.appendTranstateBlock == true);
|
ASSERT(pStreamTask->hTaskInfo.id.taskId == pTask->id.taskId && pTask->status.appendTranstateBlock == true);
|
||||||
|
|
||||||
STimeWindow* pTimeWindow = &pStreamTask->dataRange.window;
|
STimeWindow* pTimeWindow = &pStreamTask->dataRange.window;
|
||||||
|
|
||||||
|
@ -326,7 +323,7 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) {
|
||||||
} else {
|
} else {
|
||||||
ASSERT(status == TASK_STATUS__NORMAL);
|
ASSERT(status == TASK_STATUS__NORMAL);
|
||||||
pStreamTask->status.taskStatus = TASK_STATUS__HALT;
|
pStreamTask->status.taskStatus = TASK_STATUS__HALT;
|
||||||
qDebug("s-task:%s halt by related fill-history task:%s", pStreamTask->id.idStr, pTask->id.idStr);
|
stDebug("s-task:%s halt by related fill-history task:%s", pStreamTask->id.idStr, pTask->id.idStr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// wait for the stream task to handle all in the inputQ, and to be idle
|
// wait for the stream task to handle all in the inputQ, and to be idle
|
||||||
|
@ -338,12 +335,12 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) {
|
||||||
// When a task is idle with halt status, all data in inputQ are consumed.
|
// When a task is idle with halt status, all data in inputQ are consumed.
|
||||||
if (pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
if (pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||||
// update the scan data range for source task.
|
// update the scan data range for source task.
|
||||||
qDebug("s-task:%s level:%d stream task window %" PRId64 " - %" PRId64 " update to %" PRId64 " - %" PRId64
|
stDebug("s-task:%s level:%d stream task window %" PRId64 " - %" PRId64 " update to %" PRId64 " - %" PRId64
|
||||||
", status:%s, sched-status:%d",
|
", status:%s, sched-status:%d",
|
||||||
pStreamTask->id.idStr, TASK_LEVEL__SOURCE, pTimeWindow->skey, pTimeWindow->ekey, INT64_MIN,
|
pStreamTask->id.idStr, TASK_LEVEL__SOURCE, pTimeWindow->skey, pTimeWindow->ekey, INT64_MIN,
|
||||||
pTimeWindow->ekey, streamGetTaskStatusStr(TASK_STATUS__NORMAL), pStreamTask->status.schedStatus);
|
pTimeWindow->ekey, streamGetTaskStatusStr(TASK_STATUS__NORMAL), pStreamTask->status.schedStatus);
|
||||||
} else {
|
} else {
|
||||||
qDebug("s-task:%s no need to update time window for non-source task", pStreamTask->id.idStr);
|
stDebug("s-task:%s no need to update time window for non-source task", pStreamTask->id.idStr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 1. expand the query time window for stream task of WAL scanner
|
// 1. expand the query time window for stream task of WAL scanner
|
||||||
|
@ -358,14 +355,14 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) {
|
||||||
// pause, since the pause allowed attribute is not set yet.
|
// pause, since the pause allowed attribute is not set yet.
|
||||||
streamTaskResumeFromHalt(pStreamTask);
|
streamTaskResumeFromHalt(pStreamTask);
|
||||||
|
|
||||||
qDebug("s-task:%s fill-history task set status to be dropping, save the state into disk", pTask->id.idStr);
|
stDebug("s-task:%s fill-history task set status to be dropping, save the state into disk", pTask->id.idStr);
|
||||||
|
|
||||||
// 4. free it and remove fill-history task from disk meta-store
|
// 4. free it and remove fill-history task from disk meta-store
|
||||||
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &pTask->id);
|
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &pTask->id);
|
||||||
|
|
||||||
// 5. clear the link between fill-history task and stream task info
|
// 5. clear the link between fill-history task and stream task info
|
||||||
pStreamTask->historyTaskId.taskId = 0;
|
pStreamTask->hTaskInfo.id.taskId = 0;
|
||||||
pStreamTask->historyTaskId.streamId = 0;
|
pStreamTask->hTaskInfo.id.streamId = 0;
|
||||||
|
|
||||||
// 6. save to disk
|
// 6. save to disk
|
||||||
taosWLockLatch(&pMeta->lock);
|
taosWLockLatch(&pMeta->lock);
|
||||||
|
@ -386,7 +383,7 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) {
|
||||||
pItem->type = STREAM_INPUT__REF_DATA_BLOCK;
|
pItem->type = STREAM_INPUT__REF_DATA_BLOCK;
|
||||||
pItem->pBlock = pDelBlock;
|
pItem->pBlock = pDelBlock;
|
||||||
int32_t code = streamTaskPutDataIntoInputQ(pStreamTask, (SStreamQueueItem*)pItem);
|
int32_t code = streamTaskPutDataIntoInputQ(pStreamTask, (SStreamQueueItem*)pItem);
|
||||||
qDebug("s-task:%s append dummy delete block,res:%d", pStreamTask->id.idStr, code);
|
stDebug("s-task:%s append dummy delete block,res:%d", pStreamTask->id.idStr, code);
|
||||||
}
|
}
|
||||||
|
|
||||||
streamSchedExec(pStreamTask);
|
streamSchedExec(pStreamTask);
|
||||||
|
@ -425,7 +422,7 @@ static void doSetStreamInputBlock(SStreamTask* pTask, const void* pInput, int64_
|
||||||
ASSERT(pTask->info.taskLevel == TASK_LEVEL__SOURCE);
|
ASSERT(pTask->info.taskLevel == TASK_LEVEL__SOURCE);
|
||||||
const SStreamDataSubmit* pSubmit = (const SStreamDataSubmit*)pInput;
|
const SStreamDataSubmit* pSubmit = (const SStreamDataSubmit*)pInput;
|
||||||
qSetMultiStreamInput(pExecutor, &pSubmit->submit, 1, STREAM_INPUT__DATA_SUBMIT);
|
qSetMultiStreamInput(pExecutor, &pSubmit->submit, 1, STREAM_INPUT__DATA_SUBMIT);
|
||||||
qDebug("s-task:%s set submit blocks as source block completed, %p %p len:%d ver:%" PRId64, id, pSubmit,
|
stDebug("s-task:%s set submit blocks as source block completed, %p %p len:%d ver:%" PRId64, id, pSubmit,
|
||||||
pSubmit->submit.msgStr, pSubmit->submit.msgLen, pSubmit->submit.ver);
|
pSubmit->submit.msgStr, pSubmit->submit.msgLen, pSubmit->submit.ver);
|
||||||
ASSERT((*pVer) <= pSubmit->submit.ver);
|
ASSERT((*pVer) <= pSubmit->submit.ver);
|
||||||
(*pVer) = pSubmit->submit.ver;
|
(*pVer) = pSubmit->submit.ver;
|
||||||
|
@ -435,7 +432,7 @@ static void doSetStreamInputBlock(SStreamTask* pTask, const void* pInput, int64_
|
||||||
|
|
||||||
SArray* pBlockList = pBlock->blocks;
|
SArray* pBlockList = pBlock->blocks;
|
||||||
int32_t numOfBlocks = taosArrayGetSize(pBlockList);
|
int32_t numOfBlocks = taosArrayGetSize(pBlockList);
|
||||||
qDebug("s-task:%s set sdata blocks as input num:%d, ver:%" PRId64, id, numOfBlocks, pBlock->sourceVer);
|
stDebug("s-task:%s set sdata blocks as input num:%d, ver:%" PRId64, id, numOfBlocks, pBlock->sourceVer);
|
||||||
qSetMultiStreamInput(pExecutor, pBlockList->pData, numOfBlocks, STREAM_INPUT__DATA_BLOCK);
|
qSetMultiStreamInput(pExecutor, pBlockList->pData, numOfBlocks, STREAM_INPUT__DATA_BLOCK);
|
||||||
|
|
||||||
} else if (pItem->type == STREAM_INPUT__MERGED_SUBMIT) {
|
} else if (pItem->type == STREAM_INPUT__MERGED_SUBMIT) {
|
||||||
|
@ -443,7 +440,7 @@ static void doSetStreamInputBlock(SStreamTask* pTask, const void* pInput, int64_
|
||||||
|
|
||||||
SArray* pBlockList = pMerged->submits;
|
SArray* pBlockList = pMerged->submits;
|
||||||
int32_t numOfBlocks = taosArrayGetSize(pBlockList);
|
int32_t numOfBlocks = taosArrayGetSize(pBlockList);
|
||||||
qDebug("s-task:%s %p set (merged) submit blocks as a batch, numOfBlocks:%d, ver:%" PRId64, id, pTask, numOfBlocks,
|
stDebug("s-task:%s %p set (merged) submit blocks as a batch, numOfBlocks:%d, ver:%" PRId64, id, pTask, numOfBlocks,
|
||||||
pMerged->ver);
|
pMerged->ver);
|
||||||
qSetMultiStreamInput(pExecutor, pBlockList->pData, numOfBlocks, STREAM_INPUT__MERGED_SUBMIT);
|
qSetMultiStreamInput(pExecutor, pBlockList->pData, numOfBlocks, STREAM_INPUT__MERGED_SUBMIT);
|
||||||
ASSERT((*pVer) <= pMerged->ver);
|
ASSERT((*pVer) <= pMerged->ver);
|
||||||
|
@ -471,7 +468,7 @@ int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock
|
||||||
int32_t remain = streamAlignTransferState(pTask);
|
int32_t remain = streamAlignTransferState(pTask);
|
||||||
if (remain > 0) {
|
if (remain > 0) {
|
||||||
streamFreeQitem((SStreamQueueItem*)pBlock);
|
streamFreeQitem((SStreamQueueItem*)pBlock);
|
||||||
qDebug("s-task:%s receive upstream transfer state msg, remain:%d", id, remain);
|
stDebug("s-task:%s receive upstream transfer state msg, remain:%d", id, remain);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -482,9 +479,9 @@ int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock
|
||||||
// transfer the ownership of executor state
|
// transfer the ownership of executor state
|
||||||
if (type == TASK_OUTPUT__FIXED_DISPATCH || type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
if (type == TASK_OUTPUT__FIXED_DISPATCH || type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||||
if (level == TASK_LEVEL__SOURCE) {
|
if (level == TASK_LEVEL__SOURCE) {
|
||||||
qDebug("s-task:%s add transfer-state block into outputQ", id);
|
stDebug("s-task:%s add transfer-state block into outputQ", id);
|
||||||
} else {
|
} else {
|
||||||
qDebug("s-task:%s all upstream tasks send transfer-state block, add transfer-state block into outputQ", id);
|
stDebug("s-task:%s all upstream tasks send transfer-state block, add transfer-state block into outputQ", id);
|
||||||
ASSERT(pTask->streamTaskId.taskId != 0 && pTask->info.fillHistory == 1);
|
ASSERT(pTask->streamTaskId.taskId != 0 && pTask->info.fillHistory == 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -502,7 +499,7 @@ int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock
|
||||||
}
|
}
|
||||||
} else { // non-dispatch task, do task state transfer directly
|
} else { // non-dispatch task, do task state transfer directly
|
||||||
streamFreeQitem((SStreamQueueItem*)pBlock);
|
streamFreeQitem((SStreamQueueItem*)pBlock);
|
||||||
qDebug("s-task:%s non-dispatch task, start to transfer state directly", id);
|
stDebug("s-task:%s non-dispatch task, start to transfer state directly", id);
|
||||||
ASSERT(pTask->info.fillHistory == 1);
|
ASSERT(pTask->info.fillHistory == 1);
|
||||||
code = streamTransferStateToStreamTask(pTask);
|
code = streamTransferStateToStreamTask(pTask);
|
||||||
|
|
||||||
|
@ -522,17 +519,18 @@ int32_t streamExecForAll(SStreamTask* pTask) {
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
|
|
||||||
// merge multiple input data if possible in the input queue.
|
// merge multiple input data if possible in the input queue.
|
||||||
qDebug("s-task:%s start to extract data block from inputQ", id);
|
stDebug("s-task:%s start to extract data block from inputQ", id);
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
|
int32_t blockSize = 0;
|
||||||
int32_t numOfBlocks = 0;
|
int32_t numOfBlocks = 0;
|
||||||
SStreamQueueItem* pInput = NULL;
|
SStreamQueueItem* pInput = NULL;
|
||||||
if (streamTaskShouldStop(&pTask->status)) {
|
if (streamTaskShouldStop(&pTask->status)) {
|
||||||
qDebug("s-task:%s stream task is stopped", id);
|
stDebug("s-task:%s stream task is stopped", id);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*int32_t code = */ streamTaskGetDataFromInputQ(pTask, &pInput, &numOfBlocks);
|
/*int32_t code = */ streamTaskGetDataFromInputQ(pTask, &pInput, &numOfBlocks, &blockSize);
|
||||||
if (pInput == NULL) {
|
if (pInput == NULL) {
|
||||||
ASSERT(numOfBlocks == 0);
|
ASSERT(numOfBlocks == 0);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -554,8 +552,10 @@ int32_t streamExecForAll(SStreamTask* pTask) {
|
||||||
if (pTask->info.taskLevel == TASK_LEVEL__SINK) {
|
if (pTask->info.taskLevel == TASK_LEVEL__SINK) {
|
||||||
ASSERT(type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__CHECKPOINT);
|
ASSERT(type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__CHECKPOINT);
|
||||||
|
|
||||||
|
// here only handle the data block sink operation
|
||||||
if (type == STREAM_INPUT__DATA_BLOCK) {
|
if (type == STREAM_INPUT__DATA_BLOCK) {
|
||||||
qDebug("s-task:%s sink task start to sink %d blocks", id, numOfBlocks);
|
pTask->execInfo.sink.dataSize += blockSize;
|
||||||
|
stDebug("s-task:%s sink task start to sink %d blocks, size:%.2fKiB", id, numOfBlocks, SIZE_IN_KiB(blockSize));
|
||||||
doOutputResultBlockImpl(pTask, (SStreamDataBlock*)pInput);
|
doOutputResultBlockImpl(pTask, (SStreamDataBlock*)pInput);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -564,7 +564,7 @@ int32_t streamExecForAll(SStreamTask* pTask) {
|
||||||
int64_t st = taosGetTimestampMs();
|
int64_t st = taosGetTimestampMs();
|
||||||
|
|
||||||
const SStreamQueueItem* pItem = pInput;
|
const SStreamQueueItem* pItem = pInput;
|
||||||
qDebug("s-task:%s start to process batch of blocks, num:%d, type:%d", id, numOfBlocks, pItem->type);
|
stDebug("s-task:%s start to process batch of blocks, num:%d, type:%d", id, numOfBlocks, pItem->type);
|
||||||
|
|
||||||
int64_t ver = pTask->chkInfo.checkpointVer;
|
int64_t ver = pTask->chkInfo.checkpointVer;
|
||||||
doSetStreamInputBlock(pTask, pInput, &ver, id);
|
doSetStreamInputBlock(pTask, pInput, &ver, id);
|
||||||
|
@ -574,14 +574,14 @@ int32_t streamExecForAll(SStreamTask* pTask) {
|
||||||
streamTaskExecImpl(pTask, pInput, &resSize, &totalBlocks);
|
streamTaskExecImpl(pTask, pInput, &resSize, &totalBlocks);
|
||||||
|
|
||||||
double el = (taosGetTimestampMs() - st) / 1000.0;
|
double el = (taosGetTimestampMs() - st) / 1000.0;
|
||||||
qDebug("s-task:%s batch of input blocks exec end, elapsed time:%.2fs, result size:%.2fMiB, numOfBlocks:%d", id, el,
|
stDebug("s-task:%s batch of input blocks exec end, elapsed time:%.2fs, result size:%.2fMiB, numOfBlocks:%d", id, el,
|
||||||
SIZE_IN_MB(resSize), totalBlocks);
|
SIZE_IN_MiB(resSize), totalBlocks);
|
||||||
|
|
||||||
// update the currentVer if processing the submit blocks.
|
// update the currentVer if processing the submit blocks.
|
||||||
ASSERT(pTask->chkInfo.checkpointVer <= pTask->chkInfo.nextProcessVer && ver >= pTask->chkInfo.checkpointVer);
|
ASSERT(pTask->chkInfo.checkpointVer <= pTask->chkInfo.nextProcessVer && ver >= pTask->chkInfo.checkpointVer);
|
||||||
|
|
||||||
if (ver != pTask->chkInfo.checkpointVer) {
|
if (ver != pTask->chkInfo.checkpointVer) {
|
||||||
qDebug("s-task:%s update checkpointVer(unsaved) from %" PRId64 " to %" PRId64 " , currentVer:%" PRId64,
|
stDebug("s-task:%s update checkpointVer(unsaved) from %" PRId64 " to %" PRId64 ", nextProcessVer:%" PRId64,
|
||||||
pTask->id.idStr, pTask->chkInfo.checkpointVer, ver, pTask->chkInfo.nextProcessVer);
|
pTask->id.idStr, pTask->chkInfo.checkpointVer, ver, pTask->chkInfo.nextProcessVer);
|
||||||
pTask->chkInfo.checkpointVer = ver;
|
pTask->chkInfo.checkpointVer = ver;
|
||||||
}
|
}
|
||||||
|
@ -591,7 +591,7 @@ int32_t streamExecForAll(SStreamTask* pTask) {
|
||||||
// todo other thread may change the status
|
// todo other thread may change the status
|
||||||
// do nothing after sync executor state to storage backend, untill the vnode-level checkpoint is completed.
|
// do nothing after sync executor state to storage backend, untill the vnode-level checkpoint is completed.
|
||||||
if (type == STREAM_INPUT__CHECKPOINT) {
|
if (type == STREAM_INPUT__CHECKPOINT) {
|
||||||
qDebug("s-task:%s checkpoint block received, set the status:%s", pTask->id.idStr,
|
stDebug("s-task:%s checkpoint block received, set status:%s", pTask->id.idStr,
|
||||||
streamGetTaskStatusStr(pTask->status.taskStatus));
|
streamGetTaskStatusStr(pTask->status.taskStatus));
|
||||||
streamTaskBuildCheckpoint(pTask);
|
streamTaskBuildCheckpoint(pTask);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -627,14 +627,14 @@ int32_t streamTryExec(SStreamTask* pTask) {
|
||||||
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE);
|
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE);
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
|
||||||
qDebug("s-task:%s exec completed, status:%s, sched-status:%d", id,
|
stDebug("s-task:%s exec completed, status:%s, sched-status:%d", id,
|
||||||
streamGetTaskStatusStr(pTask->status.taskStatus), pTask->status.schedStatus);
|
streamGetTaskStatusStr(pTask->status.taskStatus), pTask->status.schedStatus);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
taosThreadMutexUnlock(&pTask->lock);
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
qDebug("s-task:%s already started to exec by other thread, status:%s, sched-status:%d", id,
|
stDebug("s-task:%s already started to exec by other thread, status:%s, sched-status:%d", id,
|
||||||
streamGetTaskStatusStr(pTask->status.taskStatus), pTask->status.schedStatus);
|
streamGetTaskStatusStr(pTask->status.taskStatus), pTask->status.schedStatus);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -642,7 +642,7 @@ int32_t streamTryExec(SStreamTask* pTask) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamTaskReleaseState(SStreamTask* pTask) {
|
int32_t streamTaskReleaseState(SStreamTask* pTask) {
|
||||||
qDebug("s-task:%s release exec state", pTask->id.idStr);
|
stDebug("s-task:%s release exec state", pTask->id.idStr);
|
||||||
void* pExecutor = pTask->exec.pExecutor;
|
void* pExecutor = pTask->exec.pExecutor;
|
||||||
if (pExecutor != NULL) {
|
if (pExecutor != NULL) {
|
||||||
int32_t code = qStreamOperatorReleaseState(pExecutor);
|
int32_t code = qStreamOperatorReleaseState(pExecutor);
|
||||||
|
@ -653,7 +653,7 @@ int32_t streamTaskReleaseState(SStreamTask* pTask) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamTaskReloadState(SStreamTask* pTask) {
|
int32_t streamTaskReloadState(SStreamTask* pTask) {
|
||||||
qDebug("s-task:%s reload exec state", pTask->id.idStr);
|
stDebug("s-task:%s reload exec state", pTask->id.idStr);
|
||||||
void* pExecutor = pTask->exec.pExecutor;
|
void* pExecutor = pTask->exec.pExecutor;
|
||||||
if (pExecutor != NULL) {
|
if (pExecutor != NULL) {
|
||||||
int32_t code = qStreamOperatorReloadState(pExecutor);
|
int32_t code = qStreamOperatorReloadState(pExecutor);
|
||||||
|
@ -667,7 +667,7 @@ int32_t streamAlignTransferState(SStreamTask* pTask) {
|
||||||
int32_t numOfUpstream = taosArrayGetSize(pTask->pUpstreamInfoList);
|
int32_t numOfUpstream = taosArrayGetSize(pTask->pUpstreamInfoList);
|
||||||
int32_t old = atomic_val_compare_exchange_32(&pTask->transferStateAlignCnt, 0, numOfUpstream);
|
int32_t old = atomic_val_compare_exchange_32(&pTask->transferStateAlignCnt, 0, numOfUpstream);
|
||||||
if (old == 0) {
|
if (old == 0) {
|
||||||
qDebug("s-task:%s set the transfer state aligncnt %d", pTask->id.idStr, numOfUpstream);
|
stDebug("s-task:%s set the transfer state aligncnt %d", pTask->id.idStr, numOfUpstream);
|
||||||
}
|
}
|
||||||
|
|
||||||
return atomic_sub_fetch_32(&pTask->transferStateAlignCnt, 1);
|
return atomic_sub_fetch_32(&pTask->transferStateAlignCnt, 1);
|
||||||
|
|
|
@ -21,10 +21,6 @@
|
||||||
#include "tstream.h"
|
#include "tstream.h"
|
||||||
#include "ttimer.h"
|
#include "ttimer.h"
|
||||||
|
|
||||||
#define META_HB_CHECK_INTERVAL 200
|
|
||||||
#define META_HB_SEND_IDLE_COUNTER 25 // send hb every 5 sec
|
|
||||||
#define STREAM_TASK_KEY_LEN ((sizeof(int64_t)) << 1)
|
|
||||||
|
|
||||||
static TdThreadOnce streamMetaModuleInit = PTHREAD_ONCE_INIT;
|
static TdThreadOnce streamMetaModuleInit = PTHREAD_ONCE_INIT;
|
||||||
|
|
||||||
int32_t streamBackendId = 0;
|
int32_t streamBackendId = 0;
|
||||||
|
@ -115,7 +111,7 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF
|
||||||
SStreamMeta* pMeta = taosMemoryCalloc(1, sizeof(SStreamMeta));
|
SStreamMeta* pMeta = taosMemoryCalloc(1, sizeof(SStreamMeta));
|
||||||
if (pMeta == NULL) {
|
if (pMeta == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
qError("vgId:%d failed to prepare stream meta, alloc size:%" PRIzu ", out of memory", vgId, sizeof(SStreamMeta));
|
stError("vgId:%d failed to prepare stream meta, alloc size:%" PRIzu ", out of memory", vgId, sizeof(SStreamMeta));
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,6 +147,10 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF
|
||||||
goto _err;
|
goto _err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pMeta->startInfo.pReadyTaskSet = taosHashInit(64, fp, false, HASH_NO_LOCK);
|
||||||
|
if (pMeta->startInfo.pReadyTaskSet == NULL) {
|
||||||
|
}
|
||||||
|
|
||||||
pMeta->pHbInfo = taosMemoryCalloc(1, sizeof(SMetaHbInfo));
|
pMeta->pHbInfo = taosMemoryCalloc(1, sizeof(SMetaHbInfo));
|
||||||
if (pMeta->pHbInfo == NULL) {
|
if (pMeta->pHbInfo == NULL) {
|
||||||
goto _err;
|
goto _err;
|
||||||
|
@ -190,15 +190,15 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF
|
||||||
pMeta->chkpId = streamGetLatestCheckpointId(pMeta);
|
pMeta->chkpId = streamGetLatestCheckpointId(pMeta);
|
||||||
pMeta->streamBackend = streamBackendInit(pMeta->path, pMeta->chkpId);
|
pMeta->streamBackend = streamBackendInit(pMeta->path, pMeta->chkpId);
|
||||||
while (pMeta->streamBackend == NULL) {
|
while (pMeta->streamBackend == NULL) {
|
||||||
taosMsleep(2 * 1000);
|
taosMsleep(100);
|
||||||
pMeta->streamBackend = streamBackendInit(pMeta->path, pMeta->chkpId);
|
pMeta->streamBackend = streamBackendInit(pMeta->path, pMeta->chkpId);
|
||||||
if (pMeta->streamBackend == NULL) {
|
if (pMeta->streamBackend == NULL) {
|
||||||
qError("vgId:%d failed to init stream backend", pMeta->vgId);
|
stInfo("vgId:%d failed to init stream backend, retry in 100ms", pMeta->vgId);
|
||||||
qInfo("vgId:%d retry to init stream backend", pMeta->vgId);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pMeta->streamBackendRid = taosAddRef(streamBackendId, pMeta->streamBackend);
|
pMeta->streamBackendRid = taosAddRef(streamBackendId, pMeta->streamBackend);
|
||||||
|
|
||||||
|
pMeta->role = NODE_ROLE_UNINIT;
|
||||||
code = streamBackendLoadCheckpointInfo(pMeta);
|
code = streamBackendLoadCheckpointInfo(pMeta);
|
||||||
|
|
||||||
taosInitRWLatch(&pMeta->lock);
|
taosInitRWLatch(&pMeta->lock);
|
||||||
|
@ -206,7 +206,7 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF
|
||||||
|
|
||||||
pMeta->numOfPausedTasks = 0;
|
pMeta->numOfPausedTasks = 0;
|
||||||
pMeta->numOfStreamTasks = 0;
|
pMeta->numOfStreamTasks = 0;
|
||||||
qInfo("vgId:%d open stream meta successfully, latest checkpoint:%" PRId64 ", stage:%" PRId64, vgId, pMeta->chkpId,
|
stInfo("vgId:%d open stream meta successfully, latest checkpoint:%" PRId64 ", stage:%" PRId64, vgId, pMeta->chkpId,
|
||||||
stage);
|
stage);
|
||||||
return pMeta;
|
return pMeta;
|
||||||
|
|
||||||
|
@ -219,10 +219,10 @@ _err:
|
||||||
if (pMeta->db) tdbClose(pMeta->db);
|
if (pMeta->db) tdbClose(pMeta->db);
|
||||||
if (pMeta->pHbInfo) taosMemoryFreeClear(pMeta->pHbInfo);
|
if (pMeta->pHbInfo) taosMemoryFreeClear(pMeta->pHbInfo);
|
||||||
if (pMeta->pUpdateTaskSet) taosHashCleanup(pMeta->pUpdateTaskSet);
|
if (pMeta->pUpdateTaskSet) taosHashCleanup(pMeta->pUpdateTaskSet);
|
||||||
|
if (pMeta->startInfo.pReadyTaskSet) taosHashCleanup(pMeta->startInfo.pReadyTaskSet);
|
||||||
taosMemoryFree(pMeta);
|
taosMemoryFree(pMeta);
|
||||||
|
|
||||||
qError("failed to open stream meta");
|
stError("failed to open stream meta");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -231,6 +231,7 @@ int32_t streamMetaReopen(SStreamMeta* pMeta) {
|
||||||
|
|
||||||
pMeta->streamBackendRid = -1;
|
pMeta->streamBackendRid = -1;
|
||||||
pMeta->streamBackend = NULL;
|
pMeta->streamBackend = NULL;
|
||||||
|
pMeta->role = NODE_ROLE_UNINIT;
|
||||||
|
|
||||||
char* defaultPath = taosMemoryCalloc(1, strlen(pMeta->path) + 128);
|
char* defaultPath = taosMemoryCalloc(1, strlen(pMeta->path) + 128);
|
||||||
sprintf(defaultPath, "%s%s%s", pMeta->path, TD_DIRSEP, "state");
|
sprintf(defaultPath, "%s%s%s", pMeta->path, TD_DIRSEP, "state");
|
||||||
|
@ -245,7 +246,7 @@ int32_t streamMetaReopen(SStreamMeta* pMeta) {
|
||||||
code = taosRenameFile(newPath, defaultPath);
|
code = taosRenameFile(newPath, defaultPath);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
terrno = TAOS_SYSTEM_ERROR(code);
|
terrno = TAOS_SYSTEM_ERROR(code);
|
||||||
qError("vgId:%d failed to rename file, from %s to %s, code:%s", pMeta->vgId, newPath, defaultPath,
|
stError("vgId:%d failed to rename file, from %s to %s, code:%s", pMeta->vgId, newPath, defaultPath,
|
||||||
tstrerror(terrno));
|
tstrerror(terrno));
|
||||||
|
|
||||||
taosMemoryFree(defaultPath);
|
taosMemoryFree(defaultPath);
|
||||||
|
@ -256,17 +257,18 @@ int32_t streamMetaReopen(SStreamMeta* pMeta) {
|
||||||
|
|
||||||
pMeta->streamBackend = streamBackendInit(pMeta->path, pMeta->chkpId);
|
pMeta->streamBackend = streamBackendInit(pMeta->path, pMeta->chkpId);
|
||||||
while (pMeta->streamBackend == NULL) {
|
while (pMeta->streamBackend == NULL) {
|
||||||
taosMsleep(2 * 1000);
|
taosMsleep(100);
|
||||||
pMeta->streamBackend = streamBackendInit(pMeta->path, pMeta->chkpId);
|
pMeta->streamBackend = streamBackendInit(pMeta->path, pMeta->chkpId);
|
||||||
if (pMeta->streamBackend == NULL) {
|
if (pMeta->streamBackend == NULL) {
|
||||||
qError("vgId:%d failed to init stream backend", pMeta->vgId);
|
stInfo("vgId:%d failed to init stream backend, retry in 100ms", pMeta->vgId);
|
||||||
qInfo("vgId:%d retry to init stream backend", pMeta->vgId);
|
|
||||||
// return -1;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pMeta->streamBackendRid = taosAddRef(streamBackendId, pMeta->streamBackend);
|
pMeta->streamBackendRid = taosAddRef(streamBackendId, pMeta->streamBackend);
|
||||||
streamBackendLoadCheckpointInfo(pMeta);
|
streamBackendLoadCheckpointInfo(pMeta);
|
||||||
|
|
||||||
|
taosMemoryFree(defaultPath);
|
||||||
|
taosMemoryFree(newPath);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -277,7 +279,7 @@ void streamMetaClear(SStreamMeta* pMeta) {
|
||||||
|
|
||||||
// release the ref by timer
|
// release the ref by timer
|
||||||
if (p->info.triggerParam != 0 && p->info.fillHistory == 0) { // one more ref in timer
|
if (p->info.triggerParam != 0 && p->info.fillHistory == 0) { // one more ref in timer
|
||||||
qDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", p->id.idStr, p->refCnt);
|
stDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", p->id.idStr, p->refCnt);
|
||||||
taosTmrStop(p->schedInfo.pTimer);
|
taosTmrStop(p->schedInfo.pTimer);
|
||||||
p->info.triggerParam = 0;
|
p->info.triggerParam = 0;
|
||||||
streamMetaReleaseTask(pMeta, p);
|
streamMetaReleaseTask(pMeta, p);
|
||||||
|
@ -299,7 +301,7 @@ void streamMetaClear(SStreamMeta* pMeta) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamMetaClose(SStreamMeta* pMeta) {
|
void streamMetaClose(SStreamMeta* pMeta) {
|
||||||
qDebug("start to close stream meta");
|
stDebug("start to close stream meta");
|
||||||
if (pMeta == NULL) {
|
if (pMeta == NULL) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -315,7 +317,7 @@ void streamMetaClose(SStreamMeta* pMeta) {
|
||||||
|
|
||||||
void streamMetaCloseImpl(void* arg) {
|
void streamMetaCloseImpl(void* arg) {
|
||||||
SStreamMeta* pMeta = arg;
|
SStreamMeta* pMeta = arg;
|
||||||
qDebug("start to do-close stream meta");
|
stDebug("start to do-close stream meta");
|
||||||
if (pMeta == NULL) {
|
if (pMeta == NULL) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -334,13 +336,15 @@ void streamMetaCloseImpl(void* arg) {
|
||||||
taosHashCleanup(pMeta->pTasksMap);
|
taosHashCleanup(pMeta->pTasksMap);
|
||||||
taosHashCleanup(pMeta->pTaskBackendUnique);
|
taosHashCleanup(pMeta->pTaskBackendUnique);
|
||||||
taosHashCleanup(pMeta->pUpdateTaskSet);
|
taosHashCleanup(pMeta->pUpdateTaskSet);
|
||||||
|
taosHashCleanup(pMeta->startInfo.pReadyTaskSet);
|
||||||
|
|
||||||
taosMemoryFree(pMeta->pHbInfo);
|
taosMemoryFree(pMeta->pHbInfo);
|
||||||
taosMemoryFree(pMeta->path);
|
taosMemoryFree(pMeta->path);
|
||||||
taosThreadMutexDestroy(&pMeta->backendMutex);
|
taosThreadMutexDestroy(&pMeta->backendMutex);
|
||||||
|
|
||||||
|
pMeta->role = NODE_ROLE_UNINIT;
|
||||||
taosMemoryFree(pMeta);
|
taosMemoryFree(pMeta);
|
||||||
qDebug("end to close stream meta");
|
stDebug("end to close stream meta");
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask) {
|
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask) {
|
||||||
|
@ -364,7 +368,7 @@ int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask) {
|
||||||
|
|
||||||
int64_t id[2] = {pTask->id.streamId, pTask->id.taskId};
|
int64_t id[2] = {pTask->id.streamId, pTask->id.taskId};
|
||||||
if (tdbTbUpsert(pMeta->pTaskDb, id, STREAM_TASK_KEY_LEN, buf, len, pMeta->txn) < 0) {
|
if (tdbTbUpsert(pMeta->pTaskDb, id, STREAM_TASK_KEY_LEN, buf, len, pMeta->txn) < 0) {
|
||||||
qError("s-task:%s save to disk failed, code:%s", pTask->id.idStr, tstrerror(terrno));
|
stError("s-task:%s save to disk failed, code:%s", pTask->id.idStr, tstrerror(terrno));
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -376,10 +380,10 @@ int32_t streamMetaRemoveTask(SStreamMeta* pMeta, STaskId* pTaskId) {
|
||||||
int64_t key[2] = {pTaskId->streamId, pTaskId->taskId};
|
int64_t key[2] = {pTaskId->streamId, pTaskId->taskId};
|
||||||
int32_t code = tdbTbDelete(pMeta->pTaskDb, key, STREAM_TASK_KEY_LEN, pMeta->txn);
|
int32_t code = tdbTbDelete(pMeta->pTaskDb, key, STREAM_TASK_KEY_LEN, pMeta->txn);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
qError("vgId:%d failed to remove task:0x%x from metastore, code:%s", pMeta->vgId, (int32_t) pTaskId->taskId,
|
stError("vgId:%d failed to remove task:0x%x from metastore, code:%s", pMeta->vgId, (int32_t)pTaskId->taskId,
|
||||||
tstrerror(terrno));
|
tstrerror(terrno));
|
||||||
} else {
|
} else {
|
||||||
qDebug("vgId:%d remove task:0x%x from metastore", pMeta->vgId, (int32_t) pTaskId->taskId);
|
stDebug("vgId:%d remove task:0x%x from metastore", pMeta->vgId, (int32_t)pTaskId->taskId);
|
||||||
}
|
}
|
||||||
|
|
||||||
return code;
|
return code;
|
||||||
|
@ -454,7 +458,7 @@ SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t
|
||||||
if (!streamTaskShouldStop(&(*ppTask)->status)) {
|
if (!streamTaskShouldStop(&(*ppTask)->status)) {
|
||||||
int32_t ref = atomic_add_fetch_32(&(*ppTask)->refCnt, 1);
|
int32_t ref = atomic_add_fetch_32(&(*ppTask)->refCnt, 1);
|
||||||
taosRUnLockLatch(&pMeta->lock);
|
taosRUnLockLatch(&pMeta->lock);
|
||||||
qTrace("s-task:%s acquire task, ref:%d", (*ppTask)->id.idStr, ref);
|
stTrace("s-task:%s acquire task, ref:%d", (*ppTask)->id.idStr, ref);
|
||||||
return *ppTask;
|
return *ppTask;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -466,13 +470,13 @@ SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t
|
||||||
void streamMetaReleaseTask(SStreamMeta* UNUSED_PARAM(pMeta), SStreamTask* pTask) {
|
void streamMetaReleaseTask(SStreamMeta* UNUSED_PARAM(pMeta), SStreamTask* pTask) {
|
||||||
int32_t ref = atomic_sub_fetch_32(&pTask->refCnt, 1);
|
int32_t ref = atomic_sub_fetch_32(&pTask->refCnt, 1);
|
||||||
if (ref > 0) {
|
if (ref > 0) {
|
||||||
qTrace("s-task:%s release task, ref:%d", pTask->id.idStr, ref);
|
stTrace("s-task:%s release task, ref:%d", pTask->id.idStr, ref);
|
||||||
} else if (ref == 0) {
|
} else if (ref == 0) {
|
||||||
ASSERT(streamTaskShouldStop(&pTask->status));
|
ASSERT(streamTaskShouldStop(&pTask->status));
|
||||||
qTrace("s-task:%s all refs are gone, free it", pTask->id.idStr);
|
stTrace("s-task:%s all refs are gone, free it", pTask->id.idStr);
|
||||||
tFreeStreamTask(pTask);
|
tFreeStreamTask(pTask);
|
||||||
} else if (ref < 0) {
|
} else if (ref < 0) {
|
||||||
qError("task ref is invalid, ref:%d, %s", ref, pTask->id.idStr);
|
stError("task ref is invalid, ref:%d, %s", ref, pTask->id.idStr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -498,17 +502,17 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
|
||||||
pTask = *ppTask;
|
pTask = *ppTask;
|
||||||
if (streamTaskShouldPause(&pTask->status)) {
|
if (streamTaskShouldPause(&pTask->status)) {
|
||||||
int32_t num = atomic_sub_fetch_32(&pMeta->numOfPausedTasks, 1);
|
int32_t num = atomic_sub_fetch_32(&pMeta->numOfPausedTasks, 1);
|
||||||
qInfo("vgId:%d s-task:%s drop stream task. pause task num:%d", pMeta->vgId, pTask->id.idStr, num);
|
stInfo("vgId:%d s-task:%s drop stream task. pause task num:%d", pMeta->vgId, pTask->id.idStr, num);
|
||||||
}
|
}
|
||||||
atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__DROPPING);
|
atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__DROPPING);
|
||||||
} else {
|
} else {
|
||||||
qDebug("vgId:%d failed to find the task:0x%x, it may be dropped already", pMeta->vgId, taskId);
|
stDebug("vgId:%d failed to find the task:0x%x, it may be dropped already", pMeta->vgId, taskId);
|
||||||
taosWUnLockLatch(&pMeta->lock);
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
taosWUnLockLatch(&pMeta->lock);
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
|
|
||||||
qDebug("s-task:0x%x set task status:%s and start to unregister it", taskId,
|
stDebug("s-task:0x%x set task status:%s and start to unregister it", taskId,
|
||||||
streamGetTaskStatusStr(TASK_STATUS__DROPPING));
|
streamGetTaskStatusStr(TASK_STATUS__DROPPING));
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
|
@ -522,7 +526,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
|
||||||
}
|
}
|
||||||
|
|
||||||
taosMsleep(10);
|
taosMsleep(10);
|
||||||
qDebug("s-task:%s wait for quit from timer", (*ppTask)->id.idStr);
|
stDebug("s-task:%s wait for quit from timer", (*ppTask)->id.idStr);
|
||||||
taosRUnLockLatch(&pMeta->lock);
|
taosRUnLockLatch(&pMeta->lock);
|
||||||
} else {
|
} else {
|
||||||
taosRUnLockLatch(&pMeta->lock);
|
taosRUnLockLatch(&pMeta->lock);
|
||||||
|
@ -539,9 +543,11 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
|
||||||
STaskId streamTaskId = {.streamId = (*ppTask)->streamTaskId.streamId, .taskId = (*ppTask)->streamTaskId.taskId};
|
STaskId streamTaskId = {.streamId = (*ppTask)->streamTaskId.streamId, .taskId = (*ppTask)->streamTaskId.taskId};
|
||||||
SStreamTask** ppStreamTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &streamTaskId, sizeof(streamTaskId));
|
SStreamTask** ppStreamTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &streamTaskId, sizeof(streamTaskId));
|
||||||
if (ppStreamTask != NULL) {
|
if (ppStreamTask != NULL) {
|
||||||
(*ppStreamTask)->historyTaskId.taskId = 0;
|
(*ppStreamTask)->hTaskInfo.id.taskId = 0;
|
||||||
(*ppStreamTask)->historyTaskId.streamId = 0;
|
(*ppStreamTask)->hTaskInfo.id.streamId = 0;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
atomic_sub_fetch_32(&pMeta->numOfStreamTasks, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosHashRemove(pMeta->pTasksMap, &id, sizeof(id));
|
taosHashRemove(pMeta->pTasksMap, &id, sizeof(id));
|
||||||
|
@ -551,7 +557,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
|
||||||
doRemoveIdFromList(pMeta, (int32_t)taosArrayGetSize(pMeta->pTaskList), &pTask->id);
|
doRemoveIdFromList(pMeta, (int32_t)taosArrayGetSize(pMeta->pTaskList), &pTask->id);
|
||||||
|
|
||||||
if (pTask->info.triggerParam != 0 && pTask->info.fillHistory == 0) {
|
if (pTask->info.triggerParam != 0 && pTask->info.fillHistory == 0) {
|
||||||
qDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", pTask->id.idStr, pTask->refCnt);
|
stDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", pTask->id.idStr, pTask->refCnt);
|
||||||
taosTmrStop(pTask->schedInfo.pTimer);
|
taosTmrStop(pTask->schedInfo.pTimer);
|
||||||
pTask->info.triggerParam = 0;
|
pTask->info.triggerParam = 0;
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
@ -560,7 +566,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
|
||||||
streamMetaRemoveTask(pMeta, &id);
|
streamMetaRemoveTask(pMeta, &id);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
} else {
|
} else {
|
||||||
qDebug("vgId:%d failed to find the task:0x%x, it may have been dropped already", pMeta->vgId, taskId);
|
stDebug("vgId:%d failed to find the task:0x%x, it may have been dropped already", pMeta->vgId, taskId);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosWUnLockLatch(&pMeta->lock);
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
|
@ -581,18 +587,18 @@ int32_t streamMetaBegin(SStreamMeta* pMeta) {
|
||||||
// todo add error log
|
// todo add error log
|
||||||
int32_t streamMetaCommit(SStreamMeta* pMeta) {
|
int32_t streamMetaCommit(SStreamMeta* pMeta) {
|
||||||
if (tdbCommit(pMeta->db, pMeta->txn) < 0) {
|
if (tdbCommit(pMeta->db, pMeta->txn) < 0) {
|
||||||
qError("vgId:%d failed to commit stream meta", pMeta->vgId);
|
stError("vgId:%d failed to commit stream meta", pMeta->vgId);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tdbPostCommit(pMeta->db, pMeta->txn) < 0) {
|
if (tdbPostCommit(pMeta->db, pMeta->txn) < 0) {
|
||||||
qError("vgId:%d failed to do post-commit stream meta", pMeta->vgId);
|
stError("vgId:%d failed to do post-commit stream meta", pMeta->vgId);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tdbBegin(pMeta->db, &pMeta->txn, tdbDefaultMalloc, tdbDefaultFree, NULL,
|
if (tdbBegin(pMeta->db, &pMeta->txn, tdbDefaultMalloc, tdbDefaultFree, NULL,
|
||||||
TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
|
TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
|
||||||
qError("vgId:%d failed to begin trans", pMeta->vgId);
|
stError("vgId:%d failed to begin trans", pMeta->vgId);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -628,7 +634,7 @@ int64_t streamGetLatestCheckpointId(SStreamMeta* pMeta) {
|
||||||
chkpId = TMAX(chkpId, info.checkpointId);
|
chkpId = TMAX(chkpId, info.checkpointId);
|
||||||
}
|
}
|
||||||
|
|
||||||
qDebug("get max chkp id: %" PRId64 "", chkpId);
|
stDebug("get max chkp id: %" PRId64 "", chkpId);
|
||||||
|
|
||||||
tdbFree(pKey);
|
tdbFree(pKey);
|
||||||
tdbFree(pVal);
|
tdbFree(pVal);
|
||||||
|
@ -648,10 +654,10 @@ int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta) {
|
||||||
TBC* pCur = NULL;
|
TBC* pCur = NULL;
|
||||||
int32_t vgId = pMeta->vgId;
|
int32_t vgId = pMeta->vgId;
|
||||||
|
|
||||||
qInfo("vgId:%d load stream tasks from meta files", vgId);
|
stInfo("vgId:%d load stream tasks from meta files", vgId);
|
||||||
|
|
||||||
if (tdbTbcOpen(pMeta->pTaskDb, &pCur, NULL) < 0) {
|
if (tdbTbcOpen(pMeta->pTaskDb, &pCur, NULL) < 0) {
|
||||||
qError("vgId:%d failed to open stream meta, code:%s", vgId, tstrerror(terrno));
|
stError("vgId:%d failed to open stream meta, code:%s", vgId, tstrerror(terrno));
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -667,7 +673,7 @@ int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta) {
|
||||||
SStreamTask* pTask = taosMemoryCalloc(1, sizeof(SStreamTask));
|
SStreamTask* pTask = taosMemoryCalloc(1, sizeof(SStreamTask));
|
||||||
if (pTask == NULL) {
|
if (pTask == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
qError("vgId:%d failed to load stream task from meta-files, code:%s", vgId, tstrerror(terrno));
|
stError("vgId:%d failed to load stream task from meta-files, code:%s", vgId, tstrerror(terrno));
|
||||||
doClear(pKey, pVal, pCur, pRecycleList);
|
doClear(pKey, pVal, pCur, pRecycleList);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -677,9 +683,11 @@ int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta) {
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
doClear(pKey, pVal, pCur, pRecycleList);
|
doClear(pKey, pVal, pCur, pRecycleList);
|
||||||
tFreeStreamTask(pTask);
|
tFreeStreamTask(pTask);
|
||||||
qError(
|
stError(
|
||||||
"vgId:%d stream read incompatible data, rm %s/vnode/vnode*/tq/stream if taosd cannot start, and rebuild stream "
|
"vgId:%d stream read incompatible data, rm %s/vnode/vnode*/tq/stream if taosd cannot start, and rebuild "
|
||||||
"manually", vgId, tsDataDir);
|
"stream "
|
||||||
|
"manually",
|
||||||
|
vgId, tsDataDir);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
@ -688,11 +696,11 @@ int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta) {
|
||||||
int32_t taskId = pTask->id.taskId;
|
int32_t taskId = pTask->id.taskId;
|
||||||
tFreeStreamTask(pTask);
|
tFreeStreamTask(pTask);
|
||||||
|
|
||||||
STaskId id = extractStreamTaskKey(pTask);
|
STaskId id = streamTaskExtractKey(pTask);
|
||||||
|
|
||||||
taosArrayPush(pRecycleList, &id);
|
taosArrayPush(pRecycleList, &id);
|
||||||
int32_t total = taosArrayGetSize(pRecycleList);
|
int32_t total = taosArrayGetSize(pRecycleList);
|
||||||
qDebug("s-task:0x%x is already dropped, add into recycle list, total:%d", taskId, total);
|
stDebug("s-task:0x%x is already dropped, add into recycle list, total:%d", taskId, total);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -736,7 +744,7 @@ int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta) {
|
||||||
tdbFree(pKey);
|
tdbFree(pKey);
|
||||||
tdbFree(pVal);
|
tdbFree(pVal);
|
||||||
if (tdbTbcClose(pCur) < 0) {
|
if (tdbTbcClose(pCur) < 0) {
|
||||||
qError("vgId:%d failed to close meta-file cursor", vgId);
|
stError("vgId:%d failed to close meta-file cursor", vgId);
|
||||||
taosArrayDestroy(pRecycleList);
|
taosArrayDestroy(pRecycleList);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -750,7 +758,7 @@ int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta) {
|
||||||
|
|
||||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||||
ASSERT(pMeta->numOfStreamTasks <= numOfTasks && pMeta->numOfPausedTasks <= numOfTasks);
|
ASSERT(pMeta->numOfStreamTasks <= numOfTasks && pMeta->numOfPausedTasks <= numOfTasks);
|
||||||
qDebug("vgId:%d load %d tasks into meta from disk completed, streamTask:%d, paused:%d", pMeta->vgId, numOfTasks,
|
stDebug("vgId:%d load %d tasks into meta from disk completed, streamTask:%d, paused:%d", pMeta->vgId, numOfTasks,
|
||||||
pMeta->numOfStreamTasks, pMeta->numOfPausedTasks);
|
pMeta->numOfStreamTasks, pMeta->numOfPausedTasks);
|
||||||
taosArrayDestroy(pRecycleList);
|
taosArrayDestroy(pRecycleList);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -766,6 +774,8 @@ int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pReq) {
|
||||||
if (tEncodeI64(pEncoder, ps->id.streamId) < 0) return -1;
|
if (tEncodeI64(pEncoder, ps->id.streamId) < 0) return -1;
|
||||||
if (tEncodeI32(pEncoder, ps->id.taskId) < 0) return -1;
|
if (tEncodeI32(pEncoder, ps->id.taskId) < 0) return -1;
|
||||||
if (tEncodeI32(pEncoder, ps->status) < 0) return -1;
|
if (tEncodeI32(pEncoder, ps->status) < 0) return -1;
|
||||||
|
if (tEncodeI32(pEncoder, ps->stage) < 0) return -1;
|
||||||
|
if (tEncodeI32(pEncoder, ps->nodeId) < 0) return -1;
|
||||||
}
|
}
|
||||||
tEndEncode(pEncoder);
|
tEndEncode(pEncoder);
|
||||||
return pEncoder->pos;
|
return pEncoder->pos;
|
||||||
|
@ -778,22 +788,24 @@ int32_t tDecodeStreamHbMsg(SDecoder* pDecoder, SStreamHbMsg* pReq) {
|
||||||
|
|
||||||
pReq->pTaskStatus = taosArrayInit(pReq->numOfTasks, sizeof(STaskStatusEntry));
|
pReq->pTaskStatus = taosArrayInit(pReq->numOfTasks, sizeof(STaskStatusEntry));
|
||||||
for (int32_t i = 0; i < pReq->numOfTasks; ++i) {
|
for (int32_t i = 0; i < pReq->numOfTasks; ++i) {
|
||||||
STaskStatusEntry hb = {0};
|
|
||||||
if (tDecodeI64(pDecoder, &hb.id.streamId) < 0) return -1;
|
|
||||||
int32_t taskId = 0;
|
int32_t taskId = 0;
|
||||||
|
STaskStatusEntry entry = {0};
|
||||||
|
|
||||||
|
if (tDecodeI64(pDecoder, &entry.id.streamId) < 0) return -1;
|
||||||
if (tDecodeI32(pDecoder, &taskId) < 0) return -1;
|
if (tDecodeI32(pDecoder, &taskId) < 0) return -1;
|
||||||
|
if (tDecodeI32(pDecoder, &entry.status) < 0) return -1;
|
||||||
|
if (tDecodeI32(pDecoder, &entry.stage) < 0) return -1;
|
||||||
|
if (tDecodeI32(pDecoder, &entry.nodeId) < 0) return -1;
|
||||||
|
|
||||||
hb.id.taskId = taskId;
|
entry.id.taskId = taskId;
|
||||||
if (tDecodeI32(pDecoder, &hb.status) < 0) return -1;
|
taosArrayPush(pReq->pTaskStatus, &entry);
|
||||||
|
|
||||||
taosArrayPush(pReq->pTaskStatus, &hb);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tEndDecode(pDecoder);
|
tEndDecode(pDecoder);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool enoughTimeDuration(SMetaHbInfo* pInfo) {
|
static bool waitForEnoughDuration(SMetaHbInfo* pInfo) {
|
||||||
if ((++pInfo->tickCounter) >= META_HB_SEND_IDLE_COUNTER) { // reset the counter
|
if ((++pInfo->tickCounter) >= META_HB_SEND_IDLE_COUNTER) { // reset the counter
|
||||||
pInfo->tickCounter = 0;
|
pInfo->tickCounter = 0;
|
||||||
return true;
|
return true;
|
||||||
|
@ -812,14 +824,14 @@ void metaHbToMnode(void* param, void* tmrId) {
|
||||||
// need to stop, stop now
|
// need to stop, stop now
|
||||||
if (pMeta->pHbInfo->stopFlag == STREAM_META_WILL_STOP) {
|
if (pMeta->pHbInfo->stopFlag == STREAM_META_WILL_STOP) {
|
||||||
pMeta->pHbInfo->stopFlag = STREAM_META_OK_TO_STOP;
|
pMeta->pHbInfo->stopFlag = STREAM_META_OK_TO_STOP;
|
||||||
qDebug("vgId:%d jump out of meta timer", pMeta->vgId);
|
stDebug("vgId:%d jump out of meta timer", pMeta->vgId);
|
||||||
taosReleaseRef(streamMetaId, rid);
|
taosReleaseRef(streamMetaId, rid);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// not leader not send msg
|
// not leader not send msg
|
||||||
if (!pMeta->leader) {
|
if (pMeta->role == NODE_ROLE_FOLLOWER) {
|
||||||
qInfo("vgId:%d follower not send hb to mnode", pMeta->vgId);
|
stInfo("vgId:%d follower not send hb to mnode", pMeta->vgId);
|
||||||
taosReleaseRef(streamMetaId, rid);
|
taosReleaseRef(streamMetaId, rid);
|
||||||
pMeta->pHbInfo->hbStart = 0;
|
pMeta->pHbInfo->hbStart = 0;
|
||||||
return;
|
return;
|
||||||
|
@ -830,13 +842,13 @@ void metaHbToMnode(void* param, void* tmrId) {
|
||||||
pMeta->pHbInfo->hbStart = taosGetTimestampMs();
|
pMeta->pHbInfo->hbStart = taosGetTimestampMs();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!enoughTimeDuration(pMeta->pHbInfo)) {
|
if (!waitForEnoughDuration(pMeta->pHbInfo)) {
|
||||||
taosTmrReset(metaHbToMnode, META_HB_CHECK_INTERVAL, param, streamEnv.timer, &pMeta->pHbInfo->hbTmr);
|
taosTmrReset(metaHbToMnode, META_HB_CHECK_INTERVAL, param, streamEnv.timer, &pMeta->pHbInfo->hbTmr);
|
||||||
taosReleaseRef(streamMetaId, rid);
|
taosReleaseRef(streamMetaId, rid);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
qDebug("vgId:%d build stream task hb, leader:%d", pMeta->vgId, pMeta->leader);
|
stDebug("vgId:%d build stream task hb, leader:%d", pMeta->vgId, (pMeta->role == NODE_ROLE_LEADER));
|
||||||
|
|
||||||
SStreamHbMsg hbMsg = {0};
|
SStreamHbMsg hbMsg = {0};
|
||||||
taosRLockLatch(&pMeta->lock);
|
taosRLockLatch(&pMeta->lock);
|
||||||
|
@ -855,7 +867,8 @@ void metaHbToMnode(void* param, void* tmrId) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
STaskStatusEntry entry = {.id = *pId, .status = (*pTask)->status.taskStatus};
|
STaskStatusEntry entry = {
|
||||||
|
.id = *pId, .status = (*pTask)->status.taskStatus, .nodeId = pMeta->vgId, .stage = pMeta->stage};
|
||||||
taosArrayPush(hbMsg.pTaskStatus, &entry);
|
taosArrayPush(hbMsg.pTaskStatus, &entry);
|
||||||
|
|
||||||
if (!hasValEpset) {
|
if (!hasValEpset) {
|
||||||
|
@ -873,7 +886,7 @@ void metaHbToMnode(void* param, void* tmrId) {
|
||||||
|
|
||||||
tEncodeSize(tEncodeStreamHbMsg, &hbMsg, tlen, code);
|
tEncodeSize(tEncodeStreamHbMsg, &hbMsg, tlen, code);
|
||||||
if (code < 0) {
|
if (code < 0) {
|
||||||
qError("vgId:%d encode stream hb msg failed, code:%s", pMeta->vgId, tstrerror(code));
|
stError("vgId:%d encode stream hb msg failed, code:%s", pMeta->vgId, tstrerror(code));
|
||||||
taosArrayDestroy(hbMsg.pTaskStatus);
|
taosArrayDestroy(hbMsg.pTaskStatus);
|
||||||
taosReleaseRef(streamMetaId, rid);
|
taosReleaseRef(streamMetaId, rid);
|
||||||
return;
|
return;
|
||||||
|
@ -881,7 +894,7 @@ void metaHbToMnode(void* param, void* tmrId) {
|
||||||
|
|
||||||
void* buf = rpcMallocCont(tlen);
|
void* buf = rpcMallocCont(tlen);
|
||||||
if (buf == NULL) {
|
if (buf == NULL) {
|
||||||
qError("vgId:%d encode stream hb msg failed, code:%s", pMeta->vgId, tstrerror(TSDB_CODE_OUT_OF_MEMORY));
|
stError("vgId:%d encode stream hb msg failed, code:%s", pMeta->vgId, tstrerror(TSDB_CODE_OUT_OF_MEMORY));
|
||||||
taosArrayDestroy(hbMsg.pTaskStatus);
|
taosArrayDestroy(hbMsg.pTaskStatus);
|
||||||
taosReleaseRef(streamMetaId, rid);
|
taosReleaseRef(streamMetaId, rid);
|
||||||
return;
|
return;
|
||||||
|
@ -891,7 +904,7 @@ void metaHbToMnode(void* param, void* tmrId) {
|
||||||
tEncoderInit(&encoder, buf, tlen);
|
tEncoderInit(&encoder, buf, tlen);
|
||||||
if ((code = tEncodeStreamHbMsg(&encoder, &hbMsg)) < 0) {
|
if ((code = tEncodeStreamHbMsg(&encoder, &hbMsg)) < 0) {
|
||||||
rpcFreeCont(buf);
|
rpcFreeCont(buf);
|
||||||
qError("vgId:%d encode stream hb msg failed, code:%s", pMeta->vgId, tstrerror(code));
|
stError("vgId:%d encode stream hb msg failed, code:%s", pMeta->vgId, tstrerror(code));
|
||||||
taosArrayDestroy(hbMsg.pTaskStatus);
|
taosArrayDestroy(hbMsg.pTaskStatus);
|
||||||
taosReleaseRef(streamMetaId, rid);
|
taosReleaseRef(streamMetaId, rid);
|
||||||
return;
|
return;
|
||||||
|
@ -904,11 +917,11 @@ void metaHbToMnode(void* param, void* tmrId) {
|
||||||
|
|
||||||
pMeta->pHbInfo->hbCount += 1;
|
pMeta->pHbInfo->hbCount += 1;
|
||||||
|
|
||||||
qDebug("vgId:%d, build and send hb to mnode, numOfTasks:%d total:%d", pMeta->vgId, hbMsg.numOfTasks,
|
stDebug("vgId:%d, build and send hb to mnode, numOfTasks:%d total:%d", pMeta->vgId, hbMsg.numOfTasks,
|
||||||
pMeta->pHbInfo->hbCount);
|
pMeta->pHbInfo->hbCount);
|
||||||
tmsgSendReq(&epset, &msg);
|
tmsgSendReq(&epset, &msg);
|
||||||
} else {
|
} else {
|
||||||
qDebug("vgId:%d no tasks and no mnd epset, not send stream hb to mnode", pMeta->vgId);
|
stDebug("vgId:%d no tasks and no mnd epset, not send stream hb to mnode", pMeta->vgId);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosArrayDestroy(hbMsg.pTaskStatus);
|
taosArrayDestroy(hbMsg.pTaskStatus);
|
||||||
|
@ -941,8 +954,8 @@ static bool hasStreamTaskInTimer(SStreamMeta* pMeta) {
|
||||||
void streamMetaNotifyClose(SStreamMeta* pMeta) {
|
void streamMetaNotifyClose(SStreamMeta* pMeta) {
|
||||||
int32_t vgId = pMeta->vgId;
|
int32_t vgId = pMeta->vgId;
|
||||||
|
|
||||||
qDebug("vgId:%d notify all stream tasks that the vnode is closing. isLeader:%d startHb%" PRId64 ", totalHb:%d", vgId,
|
stDebug("vgId:%d notify all stream tasks that the vnode is closing. isLeader:%d startHb%" PRId64 ", totalHb:%d", vgId,
|
||||||
pMeta->leader, pMeta->pHbInfo->hbStart, pMeta->pHbInfo->hbCount);
|
(pMeta->role == NODE_ROLE_LEADER), pMeta->pHbInfo->hbStart, pMeta->pHbInfo->hbCount);
|
||||||
|
|
||||||
taosWLockLatch(&pMeta->lock);
|
taosWLockLatch(&pMeta->lock);
|
||||||
|
|
||||||
|
@ -954,31 +967,31 @@ void streamMetaNotifyClose(SStreamMeta* pMeta) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamTask* pTask = *(SStreamTask**)pIter;
|
SStreamTask* pTask = *(SStreamTask**)pIter;
|
||||||
qDebug("vgId:%d s-task:%s set closing flag", vgId, pTask->id.idStr);
|
stDebug("vgId:%d s-task:%s set closing flag", vgId, pTask->id.idStr);
|
||||||
streamTaskStop(pTask);
|
streamTaskStop(pTask);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosWUnLockLatch(&pMeta->lock);
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
|
|
||||||
// wait for the stream meta hb function stopping
|
// wait for the stream meta hb function stopping
|
||||||
if (pMeta->leader) {
|
if (pMeta->role == NODE_ROLE_LEADER) {
|
||||||
pMeta->pHbInfo->stopFlag = STREAM_META_WILL_STOP;
|
pMeta->pHbInfo->stopFlag = STREAM_META_WILL_STOP;
|
||||||
while (pMeta->pHbInfo->stopFlag != STREAM_META_OK_TO_STOP) {
|
while (pMeta->pHbInfo->stopFlag != STREAM_META_OK_TO_STOP) {
|
||||||
taosMsleep(100);
|
taosMsleep(100);
|
||||||
qDebug("vgId:%d wait for meta to stop timer", pMeta->vgId);
|
stDebug("vgId:%d wait for meta to stop timer", pMeta->vgId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
qDebug("vgId:%d start to check all tasks", vgId);
|
stDebug("vgId:%d start to check all tasks", vgId);
|
||||||
int64_t st = taosGetTimestampMs();
|
int64_t st = taosGetTimestampMs();
|
||||||
|
|
||||||
while (hasStreamTaskInTimer(pMeta)) {
|
while (hasStreamTaskInTimer(pMeta)) {
|
||||||
qDebug("vgId:%d some tasks in timer, wait for 100ms and recheck", pMeta->vgId);
|
stDebug("vgId:%d some tasks in timer, wait for 100ms and recheck", pMeta->vgId);
|
||||||
taosMsleep(100);
|
taosMsleep(100);
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t el = taosGetTimestampMs() - st;
|
int64_t el = taosGetTimestampMs() - st;
|
||||||
qDebug("vgId:%d all stream tasks are not in timer, continue close, elapsed time:%" PRId64 " ms", pMeta->vgId, el);
|
stDebug("vgId:%d all stream tasks are not in timer, continue close, elapsed time:%" PRId64 " ms", pMeta->vgId, el);
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamMetaStartHb(SStreamMeta* pMeta) {
|
void streamMetaStartHb(SStreamMeta* pMeta) {
|
||||||
|
@ -987,3 +1000,8 @@ void streamMetaStartHb(SStreamMeta* pMeta) {
|
||||||
*pRid = pMeta->rid;
|
*pRid = pMeta->rid;
|
||||||
metaHbToMnode(pRid, NULL);
|
metaHbToMnode(pRid, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void streamMetaInitForSnode(SStreamMeta* pMeta) {
|
||||||
|
pMeta->stage = 0;
|
||||||
|
pMeta->role = NODE_ROLE_LEADER;
|
||||||
|
}
|
|
@ -20,6 +20,7 @@
|
||||||
#define STREAM_TASK_QUEUE_CAPACITY 20480
|
#define STREAM_TASK_QUEUE_CAPACITY 20480
|
||||||
#define STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE (30)
|
#define STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE (30)
|
||||||
#define STREAM_TASK_OUTPUT_QUEUE_CAPACITY_IN_SIZE (50)
|
#define STREAM_TASK_OUTPUT_QUEUE_CAPACITY_IN_SIZE (50)
|
||||||
|
#define MAX_SMOOTH_BURST_RATIO 5 // 20 sec
|
||||||
|
|
||||||
// todo refactor:
|
// todo refactor:
|
||||||
// read data from input queue
|
// read data from input queue
|
||||||
|
@ -30,7 +31,9 @@ typedef struct SQueueReader {
|
||||||
int32_t waitDuration; // maximum wait time to format several block into a batch to process, unit: ms
|
int32_t waitDuration; // maximum wait time to format several block into a batch to process, unit: ms
|
||||||
} SQueueReader;
|
} SQueueReader;
|
||||||
|
|
||||||
static bool streamTaskHasAvailableToken(STokenBucket* pBucket);
|
static bool streamTaskExtractAvailableToken(STokenBucket* pBucket);
|
||||||
|
static void streamTaskPutbackToken(STokenBucket* pBucket);
|
||||||
|
static void streamTaskConsumeQuota(STokenBucket* pBucket, int32_t bytes);
|
||||||
|
|
||||||
static void streamQueueCleanup(SStreamQueue* pQueue) {
|
static void streamQueueCleanup(SStreamQueue* pQueue) {
|
||||||
void* qItem = NULL;
|
void* qItem = NULL;
|
||||||
|
@ -65,7 +68,7 @@ SStreamQueue* streamQueueOpen(int64_t cap) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamQueueClose(SStreamQueue* pQueue, int32_t taskId) {
|
void streamQueueClose(SStreamQueue* pQueue, int32_t taskId) {
|
||||||
qDebug("s-task:0x%x free the queue:%p, items in queue:%d", taskId, pQueue->pQueue, taosQueueItemSize(pQueue->pQueue));
|
stDebug("s-task:0x%x free the queue:%p, items in queue:%d", taskId, pQueue->pQueue, taosQueueItemSize(pQueue->pQueue));
|
||||||
streamQueueCleanup(pQueue);
|
streamQueueCleanup(pQueue);
|
||||||
|
|
||||||
taosFreeQall(pQueue->qall);
|
taosFreeQall(pQueue->qall);
|
||||||
|
@ -102,64 +105,6 @@ void streamQueueProcessFail(SStreamQueue* queue) {
|
||||||
atomic_store_8(&queue->status, STREAM_QUEUE__FAILED);
|
atomic_store_8(&queue->status, STREAM_QUEUE__FAILED);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
|
||||||
bool streamQueueResEmpty(const SStreamQueueRes* pRes) {
|
|
||||||
//
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
int64_t streamQueueResSize(const SStreamQueueRes* pRes) { return pRes->size; }
|
|
||||||
SStreamQueueNode* streamQueueResFront(SStreamQueueRes* pRes) { return pRes->head; }
|
|
||||||
SStreamQueueNode* streamQueueResPop(SStreamQueueRes* pRes) {
|
|
||||||
SStreamQueueNode* pRet = pRes->head;
|
|
||||||
pRes->head = pRes->head->next;
|
|
||||||
return pRet;
|
|
||||||
}
|
|
||||||
|
|
||||||
void streamQueueResClear(SStreamQueueRes* pRes) {
|
|
||||||
while (pRes->head) {
|
|
||||||
SStreamQueueNode* pNode = pRes->head;
|
|
||||||
streamFreeQitem(pRes->head->item);
|
|
||||||
pRes->head = pNode;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SStreamQueueRes streamQueueBuildRes(SStreamQueueNode* pTail) {
|
|
||||||
int64_t size = 0;
|
|
||||||
SStreamQueueNode* head = NULL;
|
|
||||||
|
|
||||||
while (pTail) {
|
|
||||||
SStreamQueueNode* pTmp = pTail->next;
|
|
||||||
pTail->next = head;
|
|
||||||
head = pTail;
|
|
||||||
pTail = pTmp;
|
|
||||||
size++;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (SStreamQueueRes){.head = head, .size = size};
|
|
||||||
}
|
|
||||||
|
|
||||||
bool streamQueueHasTask(const SStreamQueue1* pQueue) { return atomic_load_ptr(pQueue->pHead); }
|
|
||||||
int32_t streamQueuePush(SStreamQueue1* pQueue, SStreamQueueItem* pItem) {
|
|
||||||
SStreamQueueNode* pNode = taosMemoryMalloc(sizeof(SStreamQueueNode));
|
|
||||||
pNode->item = pItem;
|
|
||||||
SStreamQueueNode* pHead = atomic_load_ptr(pQueue->pHead);
|
|
||||||
while (1) {
|
|
||||||
pNode->next = pHead;
|
|
||||||
SStreamQueueNode* pOld = atomic_val_compare_exchange_ptr(pQueue->pHead, pHead, pNode);
|
|
||||||
if (pOld == pHead) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
SStreamQueueRes streamQueueGetRes(SStreamQueue1* pQueue) {
|
|
||||||
SStreamQueueNode* pNode = atomic_exchange_ptr(pQueue->pHead, NULL);
|
|
||||||
if (pNode) return streamQueueBuildRes(pNode);
|
|
||||||
return (SStreamQueueRes){0};
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
bool streamQueueIsFull(const STaosQueue* pQueue, bool inputQ) {
|
bool streamQueueIsFull(const STaosQueue* pQueue, bool inputQ) {
|
||||||
bool isFull = taosQueueItemSize((STaosQueue*)pQueue) >= STREAM_TASK_QUEUE_CAPACITY;
|
bool isFull = taosQueueItemSize((STaosQueue*)pQueue) >= STREAM_TASK_QUEUE_CAPACITY;
|
||||||
if (isFull) {
|
if (isFull) {
|
||||||
|
@ -167,7 +112,7 @@ bool streamQueueIsFull(const STaosQueue* pQueue, bool inputQ) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t threahold = (inputQ) ? STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE : STREAM_TASK_OUTPUT_QUEUE_CAPACITY_IN_SIZE;
|
int32_t threahold = (inputQ) ? STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE : STREAM_TASK_OUTPUT_QUEUE_CAPACITY_IN_SIZE;
|
||||||
double size = SIZE_IN_MB(taosQueueMemorySize((STaosQueue*)pQueue));
|
double size = SIZE_IN_MiB(taosQueueMemorySize((STaosQueue*)pQueue));
|
||||||
return (size >= threahold);
|
return (size >= threahold);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,71 +123,98 @@ int32_t streamQueueGetNumOfItems(const SStreamQueue* pQueue) {
|
||||||
return numOfItems1 + numOfItems2;
|
return numOfItems1 + numOfItems2;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks) {
|
int32_t streamQueueGetNumOfItemsInQueue(const SStreamQueue* pQueue) {
|
||||||
|
return taosQueueItemSize(pQueue->pQueue);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t streamQueueItemGetSize(const SStreamQueueItem* pItem) {
|
||||||
|
STaosQnode* p = (STaosQnode*)((char*) pItem - sizeof(STaosQnode));
|
||||||
|
return p->dataSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
void streamQueueItemIncSize(const SStreamQueueItem* pItem, int32_t size) {
|
||||||
|
STaosQnode* p = (STaosQnode*)((char*) pItem - sizeof(STaosQnode));
|
||||||
|
p->dataSize += size;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char* streamQueueItemGetTypeStr(int32_t type) {
|
||||||
|
switch (type) {
|
||||||
|
case STREAM_INPUT__CHECKPOINT:
|
||||||
|
return "checkpoint";
|
||||||
|
case STREAM_INPUT__CHECKPOINT_TRIGGER:
|
||||||
|
return "checkpoint-trigger";
|
||||||
|
case STREAM_INPUT__TRANS_STATE:
|
||||||
|
return "trans-state";
|
||||||
|
default:
|
||||||
|
return "datablock";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks,
|
||||||
|
int32_t* blockSize) {
|
||||||
int32_t retryTimes = 0;
|
int32_t retryTimes = 0;
|
||||||
int32_t MAX_RETRY_TIMES = 5;
|
int32_t MAX_RETRY_TIMES = 5;
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
|
int32_t taskLevel = pTask->info.taskLevel;
|
||||||
|
|
||||||
if (pTask->info.taskLevel == TASK_LEVEL__SINK) { // extract block from inputQ, one-by-one
|
*pInput = NULL;
|
||||||
while (1) {
|
*numOfBlocks = 0;
|
||||||
if (streamTaskShouldPause(&pTask->status) || streamTaskShouldStop(&pTask->status)) {
|
*blockSize = 0;
|
||||||
qDebug("s-task:%s task should pause, extract input blocks:%d", pTask->id.idStr, *numOfBlocks);
|
|
||||||
|
// no available token in bucket for sink task, let's wait for a little bit
|
||||||
|
if (taskLevel == TASK_LEVEL__SINK && (!streamTaskExtractAvailableToken(pTask->pTokenBucket))) {
|
||||||
|
stDebug("s-task:%s no available token in bucket for sink data, wait", id);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
STokenBucket* pBucket = pTask->pTokenBucket;
|
while (1) {
|
||||||
if (!streamTaskHasAvailableToken(pBucket)) { // no available token in th bucket, ignore this execution
|
if (streamTaskShouldPause(&pTask->status) || streamTaskShouldStop(&pTask->status)) {
|
||||||
// qInfo("s-task:%s no available token for sink, capacity:%d, rate:%d token/sec, quit", pTask->id.idStr,
|
stDebug("s-task:%s task should pause, extract input blocks:%d", pTask->id.idStr, *numOfBlocks);
|
||||||
// pBucket->capacity, pBucket->rate);
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputInfo.queue);
|
SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputInfo.queue);
|
||||||
if (qItem == NULL) {
|
if (qItem == NULL) {
|
||||||
qDebug("===stream===break batchSize:%d, %s", *numOfBlocks, id);
|
if ((taskLevel == TASK_LEVEL__SOURCE || taskLevel == TASK_LEVEL__SINK) && (++retryTimes) < MAX_RETRY_TIMES) {
|
||||||
return TSDB_CODE_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
qDebug("s-task:%s sink task handle block one-by-one, type:%d", id, qItem->type);
|
|
||||||
|
|
||||||
*numOfBlocks = 1;
|
|
||||||
*pInput = qItem;
|
|
||||||
return TSDB_CODE_SUCCESS;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
while (1) {
|
|
||||||
if (streamTaskShouldPause(&pTask->status) || streamTaskShouldStop(&pTask->status)) {
|
|
||||||
qDebug("s-task:%s task should pause, extract input blocks:%d", pTask->id.idStr, *numOfBlocks);
|
|
||||||
return TSDB_CODE_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputInfo.queue);
|
|
||||||
if (qItem == NULL) {
|
|
||||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE && (++retryTimes) < MAX_RETRY_TIMES) {
|
|
||||||
taosMsleep(10);
|
taosMsleep(10);
|
||||||
qDebug("===stream===try again batchSize:%d, retry:%d, %s", *numOfBlocks, retryTimes, id);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
qDebug("===stream===break batchSize:%d, %s", *numOfBlocks, id);
|
// restore the token to bucket
|
||||||
|
if (*numOfBlocks > 0) {
|
||||||
|
*blockSize = streamQueueItemGetSize(*pInput);
|
||||||
|
if (taskLevel == TASK_LEVEL__SINK) {
|
||||||
|
streamTaskConsumeQuota(pTask->pTokenBucket, *blockSize);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
streamTaskPutbackToken(pTask->pTokenBucket);
|
||||||
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
// do not merge blocks for sink node and check point data block
|
// do not merge blocks for sink node and check point data block
|
||||||
if (qItem->type == STREAM_INPUT__CHECKPOINT || qItem->type == STREAM_INPUT__CHECKPOINT_TRIGGER ||
|
int8_t type = qItem->type;
|
||||||
qItem->type == STREAM_INPUT__TRANS_STATE) {
|
if (type == STREAM_INPUT__CHECKPOINT || type == STREAM_INPUT__CHECKPOINT_TRIGGER ||
|
||||||
const char* p = streamGetBlockTypeStr(qItem->type);
|
type == STREAM_INPUT__TRANS_STATE) {
|
||||||
|
const char* p = streamQueueItemGetTypeStr(type);
|
||||||
|
|
||||||
if (*pInput == NULL) {
|
if (*pInput == NULL) {
|
||||||
qDebug("s-task:%s %s msg extracted, start to process immediately", id, p);
|
stDebug("s-task:%s %s msg extracted, start to process immediately", id, p);
|
||||||
|
|
||||||
|
// restore the token to bucket in case of checkpoint/trans-state msg
|
||||||
|
streamTaskPutbackToken(pTask->pTokenBucket);
|
||||||
|
*blockSize = 0;
|
||||||
*numOfBlocks = 1;
|
*numOfBlocks = 1;
|
||||||
*pInput = qItem;
|
*pInput = qItem;
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
} else {
|
} else { // previous existed blocks needs to be handle, before handle the checkpoint msg block
|
||||||
// previous existed blocks needs to be handle, before handle the checkpoint msg block
|
stDebug("s-task:%s %s msg extracted, handle previous blocks, numOfBlocks:%d", id, p, *numOfBlocks);
|
||||||
qDebug("s-task:%s %s msg extracted, handle previous blocks, numOfBlocks:%d", id, p, *numOfBlocks);
|
*blockSize = streamQueueItemGetSize(*pInput);
|
||||||
|
if (taskLevel == TASK_LEVEL__SINK) {
|
||||||
|
streamTaskConsumeQuota(pTask->pTokenBucket, *blockSize);
|
||||||
|
}
|
||||||
|
|
||||||
streamQueueProcessFail(pTask->inputInfo.queue);
|
streamQueueProcessFail(pTask->inputInfo.queue);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -251,14 +223,19 @@ int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInpu
|
||||||
ASSERT((*numOfBlocks) == 0);
|
ASSERT((*numOfBlocks) == 0);
|
||||||
*pInput = qItem;
|
*pInput = qItem;
|
||||||
} else {
|
} else {
|
||||||
// todo we need to sort the data block, instead of just appending into the array list.
|
// merge current block failed, let's handle the already merged blocks.
|
||||||
void* newRet = streamMergeQueueItem(*pInput, qItem);
|
void* newRet = streamMergeQueueItem(*pInput, qItem);
|
||||||
if (newRet == NULL) {
|
if (newRet == NULL) {
|
||||||
if (terrno != 0) {
|
if (terrno != 0) {
|
||||||
qError("s-task:%s failed to merge blocks from inputQ, numOfBlocks:%d, code:%s", id, *numOfBlocks,
|
stError("s-task:%s failed to merge blocks from inputQ, numOfBlocks:%d, code:%s", id, *numOfBlocks,
|
||||||
tstrerror(terrno));
|
tstrerror(terrno));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
*blockSize = streamQueueItemGetSize(*pInput);
|
||||||
|
if (taskLevel == TASK_LEVEL__SINK) {
|
||||||
|
streamTaskConsumeQuota(pTask->pTokenBucket, *blockSize);
|
||||||
|
}
|
||||||
|
|
||||||
streamQueueProcessFail(pTask->inputInfo.queue);
|
streamQueueProcessFail(pTask->inputInfo.queue);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -270,7 +247,13 @@ int32_t streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInpu
|
||||||
streamQueueProcessSuccess(pTask->inputInfo.queue);
|
streamQueueProcessSuccess(pTask->inputInfo.queue);
|
||||||
|
|
||||||
if (*numOfBlocks >= MAX_STREAM_EXEC_BATCH_NUM) {
|
if (*numOfBlocks >= MAX_STREAM_EXEC_BATCH_NUM) {
|
||||||
qDebug("s-task:%s batch size limit:%d reached, start to process blocks", id, MAX_STREAM_EXEC_BATCH_NUM);
|
stDebug("s-task:%s batch size limit:%d reached, start to process blocks", id, MAX_STREAM_EXEC_BATCH_NUM);
|
||||||
|
|
||||||
|
*blockSize = streamQueueItemGetSize(*pInput);
|
||||||
|
if (taskLevel == TASK_LEVEL__SINK) {
|
||||||
|
streamTaskConsumeQuota(pTask->pTokenBucket, *blockSize);
|
||||||
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -285,8 +268,8 @@ int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem)
|
||||||
if (type == STREAM_INPUT__DATA_SUBMIT) {
|
if (type == STREAM_INPUT__DATA_SUBMIT) {
|
||||||
SStreamDataSubmit* px = (SStreamDataSubmit*)pItem;
|
SStreamDataSubmit* px = (SStreamDataSubmit*)pItem;
|
||||||
if ((pTask->info.taskLevel == TASK_LEVEL__SOURCE) && streamQueueIsFull(pQueue, true)) {
|
if ((pTask->info.taskLevel == TASK_LEVEL__SOURCE) && streamQueueIsFull(pQueue, true)) {
|
||||||
double size = SIZE_IN_MB(taosQueueMemorySize(pQueue));
|
double size = SIZE_IN_MiB(taosQueueMemorySize(pQueue));
|
||||||
qTrace(
|
stTrace(
|
||||||
"s-task:%s inputQ is full, capacity(size:%d num:%dMiB), current(blocks:%d, size:%.2fMiB) stop to push data",
|
"s-task:%s inputQ is full, capacity(size:%d num:%dMiB), current(blocks:%d, size:%.2fMiB) stop to push data",
|
||||||
pTask->id.idStr, STREAM_TASK_QUEUE_CAPACITY, STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE, total, size);
|
pTask->id.idStr, STREAM_TASK_QUEUE_CAPACITY, STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE, total, size);
|
||||||
streamDataSubmitDestroy(px);
|
streamDataSubmitDestroy(px);
|
||||||
|
@ -304,17 +287,17 @@ int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem)
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
double size = SIZE_IN_MB(taosQueueMemorySize(pQueue));
|
double size = SIZE_IN_MiB(taosQueueMemorySize(pQueue));
|
||||||
|
|
||||||
// use the local variable to avoid the pItem be freed by other threads, since it has been put into queue already.
|
// use the local variable to avoid the pItem be freed by other threads, since it has been put into queue already.
|
||||||
qDebug("s-task:%s submit enqueue msgLen:%d ver:%" PRId64 ", total in queue:%d, size:%.2fMiB", pTask->id.idStr,
|
stDebug("s-task:%s submit enqueue msgLen:%d ver:%" PRId64 ", total in queue:%d, size:%.2fMiB", pTask->id.idStr,
|
||||||
msgLen, ver, total, size + SIZE_IN_MB(msgLen));
|
msgLen, ver, total, size + SIZE_IN_MiB(msgLen));
|
||||||
} else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE ||
|
} else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE ||
|
||||||
type == STREAM_INPUT__REF_DATA_BLOCK) {
|
type == STREAM_INPUT__REF_DATA_BLOCK) {
|
||||||
if (streamQueueIsFull(pQueue, true)) {
|
if (streamQueueIsFull(pQueue, true)) {
|
||||||
double size = SIZE_IN_MB(taosQueueMemorySize(pQueue));
|
double size = SIZE_IN_MiB(taosQueueMemorySize(pQueue));
|
||||||
|
|
||||||
qTrace("s-task:%s input queue is full, capacity:%d size:%d MiB, current(blocks:%d, size:%.2fMiB) abort",
|
stTrace("s-task:%s input queue is full, capacity:%d size:%d MiB, current(blocks:%d, size:%.2fMiB) abort",
|
||||||
pTask->id.idStr, STREAM_TASK_QUEUE_CAPACITY, STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE, total, size);
|
pTask->id.idStr, STREAM_TASK_QUEUE_CAPACITY, STREAM_TASK_INPUT_QUEUE_CAPACITY_IN_SIZE, total, size);
|
||||||
destroyStreamDataBlock((SStreamDataBlock*)pItem);
|
destroyStreamDataBlock((SStreamDataBlock*)pItem);
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -326,8 +309,8 @@ int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem)
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
double size = SIZE_IN_MB(taosQueueMemorySize(pQueue));
|
double size = SIZE_IN_MiB(taosQueueMemorySize(pQueue));
|
||||||
qDebug("s-task:%s blockdata enqueue, total in queue:%d, size:%.2fMiB", pTask->id.idStr, total, size);
|
stDebug("s-task:%s blockdata enqueue, total in queue:%d, size:%.2fMiB", pTask->id.idStr, total, size);
|
||||||
} else if (type == STREAM_INPUT__CHECKPOINT || type == STREAM_INPUT__CHECKPOINT_TRIGGER ||
|
} else if (type == STREAM_INPUT__CHECKPOINT || type == STREAM_INPUT__CHECKPOINT_TRIGGER ||
|
||||||
type == STREAM_INPUT__TRANS_STATE) {
|
type == STREAM_INPUT__TRANS_STATE) {
|
||||||
int32_t code = taosWriteQitem(pQueue, pItem);
|
int32_t code = taosWriteQitem(pQueue, pItem);
|
||||||
|
@ -336,9 +319,9 @@ int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem)
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
double size = SIZE_IN_MB(taosQueueMemorySize(pQueue));
|
double size = SIZE_IN_MiB(taosQueueMemorySize(pQueue));
|
||||||
qDebug("s-task:%s level:%d %s blockdata enqueue, total in queue:%d, size:%.2fMiB", pTask->id.idStr,
|
stDebug("s-task:%s level:%d %s blockdata enqueue, total in queue:%d, size:%.2fMiB", pTask->id.idStr,
|
||||||
pTask->info.taskLevel, streamGetBlockTypeStr(type), total, size);
|
pTask->info.taskLevel, streamQueueItemGetTypeStr(type), total, size);
|
||||||
} else if (type == STREAM_INPUT__GET_RES) {
|
} else if (type == STREAM_INPUT__GET_RES) {
|
||||||
// use the default memory limit, refactor later.
|
// use the default memory limit, refactor later.
|
||||||
int32_t code = taosWriteQitem(pQueue, pItem);
|
int32_t code = taosWriteQitem(pQueue, pItem);
|
||||||
|
@ -347,15 +330,15 @@ int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem)
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
double size = SIZE_IN_MB(taosQueueMemorySize(pQueue));
|
double size = SIZE_IN_MiB(taosQueueMemorySize(pQueue));
|
||||||
qDebug("s-task:%s data res enqueue, current(blocks:%d, size:%.2fMiB)", pTask->id.idStr, total, size);
|
stDebug("s-task:%s data res enqueue, current(blocks:%d, size:%.2fMiB)", pTask->id.idStr, total, size);
|
||||||
} else {
|
} else {
|
||||||
ASSERT(0);
|
ASSERT(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (type != STREAM_INPUT__GET_RES && type != STREAM_INPUT__CHECKPOINT && pTask->info.triggerParam != 0) {
|
if (type != STREAM_INPUT__GET_RES && type != STREAM_INPUT__CHECKPOINT && pTask->info.triggerParam != 0) {
|
||||||
atomic_val_compare_exchange_8(&pTask->schedInfo.status, TASK_TRIGGER_STATUS__INACTIVE, TASK_TRIGGER_STATUS__ACTIVE);
|
atomic_val_compare_exchange_8(&pTask->schedInfo.status, TASK_TRIGGER_STATUS__INACTIVE, TASK_TRIGGER_STATUS__ACTIVE);
|
||||||
qDebug("s-task:%s new data arrived, active the trigger, triggerStatus:%d", pTask->id.idStr, pTask->schedInfo.status);
|
stDebug("s-task:%s new data arrived, active the trigger, triggerStatus:%d", pTask->id.idStr, pTask->schedInfo.status);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -367,14 +350,14 @@ int32_t streamTaskPutDataIntoOutputQ(SStreamTask* pTask, SStreamDataBlock* pBloc
|
||||||
|
|
||||||
while (streamQueueIsFull(pQueue, false)) {
|
while (streamQueueIsFull(pQueue, false)) {
|
||||||
if (streamTaskShouldStop(&pTask->status)) {
|
if (streamTaskShouldStop(&pTask->status)) {
|
||||||
qInfo("s-task:%s discard result block due to task stop", pTask->id.idStr);
|
stInfo("s-task:%s discard result block due to task stop", pTask->id.idStr);
|
||||||
return TSDB_CODE_STREAM_EXEC_CANCELLED;
|
return TSDB_CODE_STREAM_EXEC_CANCELLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t total = streamQueueGetNumOfItems(pTask->outputInfo.queue);
|
int32_t total = streamQueueGetNumOfItems(pTask->outputInfo.queue);
|
||||||
double size = SIZE_IN_MB(taosQueueMemorySize(pQueue));
|
double size = SIZE_IN_MiB(taosQueueMemorySize(pQueue));
|
||||||
// let's wait for there are enough space to hold this result pBlock
|
// let's wait for there are enough space to hold this result pBlock
|
||||||
qDebug("s-task:%s outputQ is full, wait for 500ms and retry, outputQ items:%d, size:%.2fMiB", pTask->id.idStr,
|
stDebug("s-task:%s outputQ is full, wait for 500ms and retry, outputQ items:%d, size:%.2fMiB", pTask->id.idStr,
|
||||||
total, size);
|
total, size);
|
||||||
taosMsleep(500);
|
taosMsleep(500);
|
||||||
}
|
}
|
||||||
|
@ -382,54 +365,79 @@ int32_t streamTaskPutDataIntoOutputQ(SStreamTask* pTask, SStreamDataBlock* pBloc
|
||||||
int32_t code = taosWriteQitem(pQueue, pBlock);
|
int32_t code = taosWriteQitem(pQueue, pBlock);
|
||||||
|
|
||||||
int32_t total = streamQueueGetNumOfItems(pTask->outputInfo.queue);
|
int32_t total = streamQueueGetNumOfItems(pTask->outputInfo.queue);
|
||||||
double size = SIZE_IN_MB(taosQueueMemorySize(pQueue));
|
double size = SIZE_IN_MiB(taosQueueMemorySize(pQueue));
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
qError("s-task:%s failed to put res into outputQ, outputQ items:%d, size:%.2fMiB code:%s, result lost",
|
stError("s-task:%s failed to put res into outputQ, outputQ items:%d, size:%.2fMiB code:%s, result lost",
|
||||||
pTask->id.idStr, total + 1, size, tstrerror(code));
|
pTask->id.idStr, total + 1, size, tstrerror(code));
|
||||||
} else {
|
} else {
|
||||||
qDebug("s-task:%s data put into outputQ, outputQ items:%d, size:%.2fMiB", pTask->id.idStr, total, size);
|
stDebug("s-task:%s data put into outputQ, outputQ items:%d, size:%.2fMiB", pTask->id.idStr, total, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamTaskInitTokenBucket(STokenBucket* pBucket, int32_t cap, int32_t rate) {
|
int32_t streamTaskInitTokenBucket(STokenBucket* pBucket, int32_t numCap, int32_t numRate, int32_t bytesRate) {
|
||||||
if (cap < 50 || rate < 50 || pBucket == NULL) {
|
if (numCap < 10 || numRate < 10 || pBucket == NULL) {
|
||||||
qError("failed to init sink task bucket, cap:%d, rate:%d", cap, rate);
|
stError("failed to init sink task bucket, cap:%d, rate:%d", numCap, numRate);
|
||||||
return TSDB_CODE_INVALID_PARA;
|
return TSDB_CODE_INVALID_PARA;
|
||||||
}
|
}
|
||||||
|
|
||||||
pBucket->capacity = cap;
|
pBucket->numCapacity = numCap;
|
||||||
pBucket->rate = rate;
|
pBucket->numOfToken = numCap;
|
||||||
pBucket->numOfToken = cap;
|
pBucket->numRate = numRate;
|
||||||
|
|
||||||
|
pBucket->bytesRate = bytesRate;
|
||||||
|
pBucket->bytesCapacity = bytesRate * MAX_SMOOTH_BURST_RATIO;
|
||||||
|
pBucket->bytesRemain = pBucket->bytesCapacity;
|
||||||
|
|
||||||
pBucket->fillTimestamp = taosGetTimestampMs();
|
pBucket->fillTimestamp = taosGetTimestampMs();
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fillBucket(STokenBucket* pBucket) {
|
static void fillTokenBucket(STokenBucket* pBucket) {
|
||||||
int64_t now = taosGetTimestampMs();
|
int64_t now = taosGetTimestampMs();
|
||||||
int64_t delta = now - pBucket->fillTimestamp;
|
int64_t delta = now - pBucket->fillTimestamp;
|
||||||
ASSERT(pBucket->numOfToken >= 0);
|
ASSERT(pBucket->numOfToken >= 0);
|
||||||
|
|
||||||
int32_t inc = (delta / 1000.0) * pBucket->rate;
|
int32_t incNum = (delta / 1000.0) * pBucket->numRate;
|
||||||
if (inc > 0) {
|
if (incNum > 0) {
|
||||||
if ((pBucket->numOfToken + inc) < pBucket->capacity) {
|
pBucket->numOfToken = TMIN(pBucket->numOfToken + incNum, pBucket->numCapacity);
|
||||||
pBucket->numOfToken += inc;
|
|
||||||
} else {
|
|
||||||
pBucket->numOfToken = pBucket->capacity;
|
|
||||||
}
|
|
||||||
|
|
||||||
pBucket->fillTimestamp = now;
|
pBucket->fillTimestamp = now;
|
||||||
qDebug("new token available, current:%d, inc:%d ts:%"PRId64, pBucket->numOfToken, inc, now);
|
}
|
||||||
|
|
||||||
|
// increase the new available quota as time goes on
|
||||||
|
double incSize = (delta / 1000.0) * pBucket->bytesRate;
|
||||||
|
if (incSize > 0) {
|
||||||
|
pBucket->bytesRemain = TMIN(pBucket->bytesRemain + incSize, pBucket->bytesCapacity);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (incNum > 0) {
|
||||||
|
stDebug("new token and capacity available, current token:%d inc:%d, current quota:%.2fMiB inc:%.2fMiB, ts:%" PRId64
|
||||||
|
" wait for %.2f Sec",
|
||||||
|
pBucket->numOfToken, incNum, pBucket->bytesRemain, incSize, now, delta / 1000.0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool streamTaskHasAvailableToken(STokenBucket* pBucket) {
|
bool streamTaskExtractAvailableToken(STokenBucket* pBucket) {
|
||||||
fillBucket(pBucket);
|
fillTokenBucket(pBucket);
|
||||||
|
|
||||||
if (pBucket->numOfToken > 0) {
|
if (pBucket->numOfToken > 0) {
|
||||||
--pBucket->numOfToken;
|
if (pBucket->bytesRemain > 0) {
|
||||||
|
pBucket->numOfToken -= 1;
|
||||||
return true;
|
return true;
|
||||||
|
} else { // no available size quota now
|
||||||
|
return false;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void streamTaskPutbackToken(STokenBucket* pBucket) {
|
||||||
|
pBucket->numOfToken = TMIN(pBucket->numOfToken + 1, pBucket->numCapacity);
|
||||||
|
}
|
||||||
|
|
||||||
|
// size in KB
|
||||||
|
void streamTaskConsumeQuota(STokenBucket* pBucket, int32_t bytes) {
|
||||||
|
pBucket->bytesRemain -= SIZE_IN_MiB(bytes);
|
||||||
|
}
|
|
@ -13,25 +13,35 @@
|
||||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <tstream.h>
|
|
||||||
#include "streamInt.h"
|
#include "streamInt.h"
|
||||||
#include "trpc.h"
|
#include "trpc.h"
|
||||||
#include "ttimer.h"
|
#include "ttimer.h"
|
||||||
#include "wal.h"
|
#include "wal.h"
|
||||||
|
|
||||||
typedef struct SStreamTaskRetryInfo {
|
typedef struct SLaunchHTaskInfo {
|
||||||
SStreamMeta* pMeta;
|
SStreamMeta* pMeta;
|
||||||
STaskId id;
|
STaskId id;
|
||||||
} SStreamTaskRetryInfo;
|
} SLaunchHTaskInfo;
|
||||||
|
|
||||||
|
typedef struct STaskRecheckInfo {
|
||||||
|
SStreamTask* pTask;
|
||||||
|
SStreamTaskCheckReq req;
|
||||||
|
void* checkTimer;
|
||||||
|
} STaskRecheckInfo;
|
||||||
|
|
||||||
static int32_t streamSetParamForScanHistory(SStreamTask* pTask);
|
static int32_t streamSetParamForScanHistory(SStreamTask* pTask);
|
||||||
static void streamTaskSetRangeStreamCalc(SStreamTask* pTask);
|
static void streamTaskSetRangeStreamCalc(SStreamTask* pTask);
|
||||||
static int32_t initScanHistoryReq(SStreamTask* pTask, SStreamScanHistoryReq* pReq, int8_t igUntreated);
|
static int32_t initScanHistoryReq(SStreamTask* pTask, SStreamScanHistoryReq* pReq, int8_t igUntreated);
|
||||||
|
static SLaunchHTaskInfo* createHTaskLaunchInfo(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
||||||
|
static void tryLaunchHistoryTask(void* param, void* tmrId);
|
||||||
|
|
||||||
static void streamTaskSetReady(SStreamTask* pTask, int32_t numOfReqs) {
|
static void streamTaskSetReady(SStreamTask* pTask, int32_t numOfReqs) {
|
||||||
|
SStreamMeta* pMeta = pTask->pMeta;
|
||||||
|
int32_t vgId = pMeta->vgId;
|
||||||
|
|
||||||
if (pTask->status.taskStatus == TASK_STATUS__SCAN_HISTORY && pTask->info.taskLevel != TASK_LEVEL__SOURCE) {
|
if (pTask->status.taskStatus == TASK_STATUS__SCAN_HISTORY && pTask->info.taskLevel != TASK_LEVEL__SOURCE) {
|
||||||
pTask->numOfWaitingUpstream = taosArrayGetSize(pTask->pUpstreamInfoList);
|
pTask->numOfWaitingUpstream = taosArrayGetSize(pTask->pUpstreamInfoList);
|
||||||
qDebug("s-task:%s level:%d task wait for %d upstream tasks complete scan-history procedure, status:%s",
|
stDebug("s-task:%s level:%d task wait for %d upstream tasks complete scan-history procedure, status:%s",
|
||||||
pTask->id.idStr, pTask->info.taskLevel, pTask->numOfWaitingUpstream,
|
pTask->id.idStr, pTask->info.taskLevel, pTask->numOfWaitingUpstream,
|
||||||
streamGetTaskStatusStr(pTask->status.taskStatus));
|
streamGetTaskStatusStr(pTask->status.taskStatus));
|
||||||
}
|
}
|
||||||
|
@ -39,9 +49,27 @@ static void streamTaskSetReady(SStreamTask* pTask, int32_t numOfReqs) {
|
||||||
ASSERT(pTask->status.downstreamReady == 0);
|
ASSERT(pTask->status.downstreamReady == 0);
|
||||||
pTask->status.downstreamReady = 1;
|
pTask->status.downstreamReady = 1;
|
||||||
|
|
||||||
int64_t el = (taosGetTimestampMs() - pTask->taskExecInfo.init);
|
pTask->execInfo.start = taosGetTimestampMs();
|
||||||
qDebug("s-task:%s all %d downstream ready, init completed, elapsed time:%"PRId64"ms, task status:%s",
|
int64_t el = (pTask->execInfo.start - pTask->execInfo.init);
|
||||||
|
stDebug("s-task:%s all %d downstream ready, init completed, elapsed time:%" PRId64 "ms, task status:%s",
|
||||||
pTask->id.idStr, numOfReqs, el, streamGetTaskStatusStr(pTask->status.taskStatus));
|
pTask->id.idStr, numOfReqs, el, streamGetTaskStatusStr(pTask->status.taskStatus));
|
||||||
|
|
||||||
|
taosWLockLatch(&pMeta->lock);
|
||||||
|
|
||||||
|
STaskId id = streamTaskExtractKey(pTask);
|
||||||
|
taosHashPut(pMeta->startInfo.pReadyTaskSet, &id, sizeof(id), NULL, 0);
|
||||||
|
int32_t numOfTotal = streamMetaGetNumOfTasks(pMeta);
|
||||||
|
|
||||||
|
if (taosHashGetSize(pMeta->startInfo.pReadyTaskSet) == numOfTotal) {
|
||||||
|
// reset value for next time start
|
||||||
|
taosHashClear(pMeta->startInfo.pReadyTaskSet);
|
||||||
|
pMeta->startInfo.startedAfterNodeUpdate = 0;
|
||||||
|
pMeta->startInfo.elapsedTime = pTask->execInfo.start - pMeta->startInfo.ts;
|
||||||
|
|
||||||
|
stDebug("vgId:%d all %d task(s) are started successfully, last ready task:%s level:%d, total elapsed time:%.2f sec",
|
||||||
|
vgId, numOfTotal, pTask->id.idStr, pTask->info.taskLevel, pMeta->startInfo.elapsedTime / 1000.0);
|
||||||
|
}
|
||||||
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated) {
|
int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated) {
|
||||||
|
@ -64,20 +92,6 @@ int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* streamGetTaskStatusStr(int32_t status) {
|
|
||||||
switch(status) {
|
|
||||||
case TASK_STATUS__NORMAL: return "normal";
|
|
||||||
case TASK_STATUS__SCAN_HISTORY: return "scan-history";
|
|
||||||
case TASK_STATUS__HALT: return "halt";
|
|
||||||
case TASK_STATUS__PAUSE: return "paused";
|
|
||||||
case TASK_STATUS__CK: return "check-point";
|
|
||||||
case TASK_STATUS__DROPPING: return "dropping";
|
|
||||||
case TASK_STATUS__STOP: return "stop";
|
|
||||||
case TASK_STATUS__UNINIT: return "uninitialized";
|
|
||||||
default:return "";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int32_t doLaunchScanHistoryTask(SStreamTask* pTask) {
|
static int32_t doLaunchScanHistoryTask(SStreamTask* pTask) {
|
||||||
SVersionRange* pRange = &pTask->dataRange.range;
|
SVersionRange* pRange = &pTask->dataRange.range;
|
||||||
if (pTask->info.fillHistory) {
|
if (pTask->info.fillHistory) {
|
||||||
|
@ -95,7 +109,7 @@ int32_t streamTaskLaunchScanHistory(SStreamTask* pTask) {
|
||||||
return doLaunchScanHistoryTask(pTask);
|
return doLaunchScanHistoryTask(pTask);
|
||||||
} else {
|
} else {
|
||||||
ASSERT(pTask->status.taskStatus == TASK_STATUS__NORMAL);
|
ASSERT(pTask->status.taskStatus == TASK_STATUS__NORMAL);
|
||||||
qDebug("s-task:%s no need to scan-history-data, status:%s, sched-status:%d, ver:%" PRId64, pTask->id.idStr,
|
stDebug("s-task:%s no need to scan-history-data, status:%s, sched-status:%d, ver:%" PRId64, pTask->id.idStr,
|
||||||
streamGetTaskStatusStr(pTask->status.taskStatus), pTask->status.schedStatus,
|
streamGetTaskStatusStr(pTask->status.taskStatus), pTask->status.schedStatus,
|
||||||
walReaderGetCurrentVer(pTask->exec.pWalReader));
|
walReaderGetCurrentVer(pTask->exec.pWalReader));
|
||||||
}
|
}
|
||||||
|
@ -105,7 +119,7 @@ int32_t streamTaskLaunchScanHistory(SStreamTask* pTask) {
|
||||||
streamTaskEnablePause(pTask);
|
streamTaskEnablePause(pTask);
|
||||||
}
|
}
|
||||||
} else if (pTask->info.taskLevel == TASK_LEVEL__SINK) {
|
} else if (pTask->info.taskLevel == TASK_LEVEL__SINK) {
|
||||||
qDebug("s-task:%s sink task do nothing to handle scan-history", pTask->id.idStr);
|
stDebug("s-task:%s sink task do nothing to handle scan-history", pTask->id.idStr);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -126,16 +140,16 @@ static int32_t doCheckDownstreamStatus(SStreamTask* pTask) {
|
||||||
// serialize streamProcessScanHistoryFinishRsp
|
// serialize streamProcessScanHistoryFinishRsp
|
||||||
if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
|
if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
|
||||||
req.reqId = tGenIdPI64();
|
req.reqId = tGenIdPI64();
|
||||||
req.downstreamNodeId = pTask->fixedEpDispatcher.nodeId;
|
req.downstreamNodeId = pTask->fixedDispatcher.nodeId;
|
||||||
req.downstreamTaskId = pTask->fixedEpDispatcher.taskId;
|
req.downstreamTaskId = pTask->fixedDispatcher.taskId;
|
||||||
pTask->checkReqId = req.reqId;
|
pTask->checkReqId = req.reqId;
|
||||||
|
|
||||||
qDebug("s-task:%s check single downstream task:0x%x(vgId:%d) ver:%" PRId64 "-%" PRId64 " window:%" PRId64
|
stDebug("s-task:%s stage:%" PRId64 " check single downstream task:0x%x(vgId:%d) ver:%" PRId64 "-%" PRId64
|
||||||
"-%" PRId64 ", stage:%"PRId64" req:0x%" PRIx64,
|
" window:%" PRId64 "-%" PRId64 " req:0x%" PRIx64,
|
||||||
pTask->id.idStr, req.downstreamTaskId, req.downstreamNodeId, pRange->range.minVer, pRange->range.maxVer,
|
pTask->id.idStr, req.reqId, req.downstreamTaskId, req.downstreamNodeId, pRange->range.minVer,
|
||||||
pWindow->skey, pWindow->ekey, req.stage, req.reqId);
|
pRange->range.maxVer, pWindow->skey, pWindow->ekey, req.reqId);
|
||||||
|
|
||||||
streamDispatchCheckMsg(pTask, &req, pTask->fixedEpDispatcher.nodeId, &pTask->fixedEpDispatcher.epSet);
|
streamSendCheckMsg(pTask, &req, pTask->fixedDispatcher.nodeId, &pTask->fixedDispatcher.epSet);
|
||||||
} else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
} else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||||
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
|
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
|
||||||
|
|
||||||
|
@ -143,7 +157,7 @@ static int32_t doCheckDownstreamStatus(SStreamTask* pTask) {
|
||||||
pTask->notReadyTasks = numOfVgs;
|
pTask->notReadyTasks = numOfVgs;
|
||||||
pTask->checkReqIds = taosArrayInit(numOfVgs, sizeof(int64_t));
|
pTask->checkReqIds = taosArrayInit(numOfVgs, sizeof(int64_t));
|
||||||
|
|
||||||
qDebug("s-task:%s check %d downstream tasks, ver:%" PRId64 "-%" PRId64 " window:%" PRId64 "-%" PRId64,
|
stDebug("s-task:%s check %d downstream tasks, ver:%" PRId64 "-%" PRId64 " window:%" PRId64 "-%" PRId64,
|
||||||
pTask->id.idStr, numOfVgs, pRange->range.minVer, pRange->range.maxVer, pWindow->skey, pWindow->ekey);
|
pTask->id.idStr, numOfVgs, pRange->range.minVer, pRange->range.maxVer, pWindow->skey, pWindow->ekey);
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfVgs; i++) {
|
for (int32_t i = 0; i < numOfVgs; i++) {
|
||||||
|
@ -152,12 +166,12 @@ static int32_t doCheckDownstreamStatus(SStreamTask* pTask) {
|
||||||
taosArrayPush(pTask->checkReqIds, &req.reqId);
|
taosArrayPush(pTask->checkReqIds, &req.reqId);
|
||||||
req.downstreamNodeId = pVgInfo->vgId;
|
req.downstreamNodeId = pVgInfo->vgId;
|
||||||
req.downstreamTaskId = pVgInfo->taskId;
|
req.downstreamTaskId = pVgInfo->taskId;
|
||||||
qDebug("s-task:%s (vgId:%d) check downstream task:0x%x (vgId:%d) (shuffle), idx:%d, stage:%" PRId64,
|
stDebug("s-task:%s (vgId:%d) stage:%" PRId64 " check downstream task:0x%x (vgId:%d) (shuffle), idx:%d",
|
||||||
pTask->id.idStr, pTask->info.nodeId, req.downstreamTaskId, req.downstreamNodeId, i, req.stage);
|
pTask->id.idStr, pTask->info.nodeId, req.stage, req.downstreamTaskId, req.downstreamNodeId, i);
|
||||||
streamDispatchCheckMsg(pTask, &req, pVgInfo->vgId, &pVgInfo->epSet);
|
streamSendCheckMsg(pTask, &req, pVgInfo->vgId, &pVgInfo->epSet);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
qDebug("s-task:%s (vgId:%d) set downstream ready, since no downstream", pTask->id.idStr, pTask->info.nodeId);
|
stDebug("s-task:%s (vgId:%d) set downstream ready, since no downstream", pTask->id.idStr, pTask->info.nodeId);
|
||||||
|
|
||||||
streamTaskSetReady(pTask, 0);
|
streamTaskSetReady(pTask, 0);
|
||||||
streamTaskSetRangeStreamCalc(pTask);
|
streamTaskSetRangeStreamCalc(pTask);
|
||||||
|
@ -168,8 +182,15 @@ static int32_t doCheckDownstreamStatus(SStreamTask* pTask) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamRecheckDownstream(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp) {
|
static STaskRecheckInfo* createRecheckInfo(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp) {
|
||||||
SStreamTaskCheckReq req = {
|
STaskRecheckInfo* pInfo = taosMemoryCalloc(1, sizeof(STaskRecheckInfo));
|
||||||
|
if (pInfo == NULL) {
|
||||||
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
pInfo->pTask = pTask;
|
||||||
|
pInfo->req = (SStreamTaskCheckReq){
|
||||||
.reqId = pRsp->reqId,
|
.reqId = pRsp->reqId,
|
||||||
.streamId = pRsp->streamId,
|
.streamId = pRsp->streamId,
|
||||||
.upstreamTaskId = pRsp->upstreamTaskId,
|
.upstreamTaskId = pRsp->upstreamTaskId,
|
||||||
|
@ -180,25 +201,44 @@ int32_t streamRecheckDownstream(SStreamTask* pTask, const SStreamTaskCheckRsp* p
|
||||||
.stage = pTask->pMeta->stage,
|
.stage = pTask->pMeta->stage,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
return pInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void destroyRecheckInfo(STaskRecheckInfo* pInfo) {
|
||||||
|
if (pInfo != NULL) {
|
||||||
|
taosTmrStop(pInfo->checkTimer);
|
||||||
|
pInfo->checkTimer = NULL;
|
||||||
|
taosMemoryFree(pInfo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void recheckDownstreamTasks(void* param, void* tmrId) {
|
||||||
|
STaskRecheckInfo* pInfo = param;
|
||||||
|
SStreamTask* pTask = pInfo->pTask;
|
||||||
|
|
||||||
|
SStreamTaskCheckReq* pReq = &pInfo->req;
|
||||||
|
|
||||||
if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
|
if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
|
||||||
qDebug("s-task:%s (vgId:%d) check downstream task:0x%x (vgId:%d) stage:%" PRId64 " (recheck)", pTask->id.idStr,
|
stDebug("s-task:%s (vgId:%d) check downstream task:0x%x (vgId:%d) stage:%" PRId64 " (recheck)", pTask->id.idStr,
|
||||||
pTask->info.nodeId, req.downstreamTaskId, req.downstreamNodeId, req.stage);
|
pTask->info.nodeId, pReq->downstreamTaskId, pReq->downstreamNodeId, pReq->stage);
|
||||||
streamDispatchCheckMsg(pTask, &req, pRsp->downstreamNodeId, &pTask->fixedEpDispatcher.epSet);
|
streamSendCheckMsg(pTask, pReq, pReq->downstreamNodeId, &pTask->fixedDispatcher.epSet);
|
||||||
} else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
} else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||||
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
|
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
|
||||||
|
|
||||||
int32_t numOfVgs = taosArrayGetSize(vgInfo);
|
int32_t numOfVgs = taosArrayGetSize(vgInfo);
|
||||||
for (int32_t i = 0; i < numOfVgs; i++) {
|
for (int32_t i = 0; i < numOfVgs; i++) {
|
||||||
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
|
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
|
||||||
if (pVgInfo->taskId == req.downstreamTaskId) {
|
if (pVgInfo->taskId == pReq->downstreamTaskId) {
|
||||||
qDebug("s-task:%s (vgId:%d) check downstream task:0x%x (vgId:%d) stage:%" PRId64 " (recheck)", pTask->id.idStr,
|
stDebug("s-task:%s (vgId:%d) check downstream task:0x%x (vgId:%d) stage:%" PRId64 " (recheck)", pTask->id.idStr,
|
||||||
pTask->info.nodeId, req.downstreamTaskId, req.downstreamNodeId, req.stage);
|
pTask->info.nodeId, pReq->downstreamTaskId, pReq->downstreamNodeId, pReq->stage);
|
||||||
streamDispatchCheckMsg(pTask, &req, pRsp->downstreamNodeId, &pVgInfo->epSet);
|
streamSendCheckMsg(pTask, pReq, pReq->downstreamNodeId, &pVgInfo->epSet);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
destroyRecheckInfo(pInfo);
|
||||||
|
int8_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||||
|
stDebug("s-task:%s complete send check in timer, ref:%d", pTask->id.idStr, ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_t vgId, int64_t stage) {
|
int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_t vgId, int64_t stage) {
|
||||||
|
@ -207,23 +247,30 @@ int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_
|
||||||
|
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
if (stage == -1) {
|
if (stage == -1) {
|
||||||
qDebug("s-task:%s receive check msg from upstream task:0x%x, invalid stageId:%" PRId64 ", not ready", id,
|
stDebug("s-task:%s receive check msg from upstream task:0x%x(vgId:%d), invalid stageId:%" PRId64 ", not ready", id,
|
||||||
upstreamTaskId, stage);
|
upstreamTaskId, vgId, stage);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pInfo->stage == -1) {
|
if (pInfo->stage == -1) {
|
||||||
pInfo->stage = stage;
|
pInfo->stage = stage;
|
||||||
qDebug("s-task:%s receive check msg from upstream task:0x%x for the time, init stage value:%" PRId64, id,
|
stDebug("s-task:%s receive check msg from upstream task:0x%x(vgId:%d) first time, init stage value:%" PRId64, id,
|
||||||
upstreamTaskId, stage);
|
upstreamTaskId, vgId, stage);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pInfo->stage < stage) {
|
if (pInfo->stage < stage) {
|
||||||
qError("s-task:%s receive msg from upstream task:0x%x(vgId:%d), new stage received:%" PRId64 ", prev:%" PRId64,
|
stError("s-task:%s receive check msg from upstream task:0x%x(vgId:%d), new stage received:%" PRId64
|
||||||
|
", prev:%" PRId64,
|
||||||
id, upstreamTaskId, vgId, stage, pInfo->stage);
|
id, upstreamTaskId, vgId, stage, pInfo->stage);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ((pTask->status.downstreamReady == 1) && (pInfo->stage == stage))? 1:0;
|
if (pInfo->stage != stage) {
|
||||||
|
return TASK_SELF_NEW_STAGE;
|
||||||
|
} else if (pTask->status.downstreamReady != 1) {
|
||||||
|
return TASK_DOWNSTREAM_NOT_READY;
|
||||||
|
} else {
|
||||||
|
return TASK_DOWNSTREAM_READY;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void doProcessDownstreamReadyRsp(SStreamTask* pTask, int32_t numOfReqs) {
|
static void doProcessDownstreamReadyRsp(SStreamTask* pTask, int32_t numOfReqs) {
|
||||||
|
@ -237,15 +284,15 @@ static void doProcessDownstreamReadyRsp(SStreamTask* pTask, int32_t numOfReqs) {
|
||||||
streamTaskSetRangeStreamCalc(pTask);
|
streamTaskSetRangeStreamCalc(pTask);
|
||||||
|
|
||||||
if (status == TASK_STATUS__SCAN_HISTORY) {
|
if (status == TASK_STATUS__SCAN_HISTORY) {
|
||||||
qDebug("s-task:%s enter into scan-history data stage, status:%s", id, str);
|
stDebug("s-task:%s enter into scan-history data stage, status:%s", id, str);
|
||||||
streamTaskLaunchScanHistory(pTask);
|
streamTaskLaunchScanHistory(pTask);
|
||||||
} else {
|
} else {
|
||||||
if (pTask->info.fillHistory == 1) {
|
if (pTask->info.fillHistory == 1) {
|
||||||
qDebug("s-task:%s fill-history is set normal when start it, try to remove it,set it task to be dropping", id);
|
stDebug("s-task:%s fill-history is set normal when start it, try to remove it,set it task to be dropping", id);
|
||||||
pTask->status.taskStatus = TASK_STATUS__DROPPING;
|
pTask->status.taskStatus = TASK_STATUS__DROPPING;
|
||||||
ASSERT(pTask->historyTaskId.taskId == 0);
|
ASSERT(pTask->hTaskInfo.id.taskId == 0);
|
||||||
} else {
|
} else {
|
||||||
qDebug("s-task:%s downstream tasks are ready, now ready for data from wal, status:%s", id, str);
|
stDebug("s-task:%s downstream tasks are ready, now ready for data from wal, status:%s", id, str);
|
||||||
streamTaskEnablePause(pTask);
|
streamTaskEnablePause(pTask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -259,7 +306,12 @@ int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRs
|
||||||
ASSERT(pTask->id.taskId == pRsp->upstreamTaskId);
|
ASSERT(pTask->id.taskId == pRsp->upstreamTaskId);
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
|
|
||||||
if (pRsp->status == 1) {
|
if (streamTaskShouldStop(&pTask->status)) {
|
||||||
|
stDebug("s-task:%s should stop, do not do check downstream again", id);
|
||||||
|
return TSDB_CODE_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pRsp->status == TASK_DOWNSTREAM_READY) {
|
||||||
if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||||
bool found = false;
|
bool found = false;
|
||||||
|
|
||||||
|
@ -286,7 +338,7 @@ int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRs
|
||||||
doProcessDownstreamReadyRsp(pTask, numOfReqs);
|
doProcessDownstreamReadyRsp(pTask, numOfReqs);
|
||||||
} else {
|
} else {
|
||||||
int32_t total = taosArrayGetSize(pTask->shuffleDispatcher.dbInfo.pVgroupInfos);
|
int32_t total = taosArrayGetSize(pTask->shuffleDispatcher.dbInfo.pVgroupInfos);
|
||||||
qDebug("s-task:%s (vgId:%d) recv check rsp from task:0x%x (vgId:%d) status:%d, total:%d not ready:%d", id,
|
stDebug("s-task:%s (vgId:%d) recv check rsp from task:0x%x (vgId:%d) status:%d, total:%d not ready:%d", id,
|
||||||
pRsp->upstreamNodeId, pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->status, total, left);
|
pRsp->upstreamNodeId, pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->status, total, left);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -298,10 +350,25 @@ int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRs
|
||||||
doProcessDownstreamReadyRsp(pTask, 1);
|
doProcessDownstreamReadyRsp(pTask, 1);
|
||||||
}
|
}
|
||||||
} else { // not ready, wait for 100ms and retry
|
} else { // not ready, wait for 100ms and retry
|
||||||
qDebug("s-task:%s downstream taskId:0x%x (vgId:%d) not ready, stage:%d, wait for 100ms and retry", id,
|
if (pRsp->status == TASK_DOWNSTREAM_NOT_LEADER) {
|
||||||
pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->oldStage);
|
stError(
|
||||||
taosMsleep(100);
|
"s-task:%s downstream taskId:0x%x (vgId:%d) vnode-transfer/leader-change detected, not send check again, "
|
||||||
streamRecheckDownstream(pTask, pRsp);
|
"roll-back needed",
|
||||||
|
id, pRsp->downstreamTaskId, pRsp->downstreamNodeId);
|
||||||
|
} else if (pRsp->status == TASK_SELF_NEW_STAGE) {
|
||||||
|
stError(
|
||||||
|
"s-task:%s vnode-transfer/leader-change/restart detected, old stage:%d, current stage:%d, not send check "
|
||||||
|
"again, roll-back needed",
|
||||||
|
id, pRsp->oldStage, (int32_t)pTask->pMeta->stage);
|
||||||
|
} else {
|
||||||
|
STaskRecheckInfo* pInfo = createRecheckInfo(pTask, pRsp);
|
||||||
|
|
||||||
|
int8_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
||||||
|
stDebug("s-task:%s downstream taskId:0x%x (vgId:%d) not ready, stage:%d, retry in 100ms, ref:%d ", id,
|
||||||
|
pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->oldStage, ref);
|
||||||
|
|
||||||
|
taosTmrReset(recheckDownstreamTasks, CHECK_DOWNSTREAM_INTERVAL, pInfo, streamEnv.timer, &pInfo->checkTimer);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -315,7 +382,7 @@ int32_t streamSendCheckRsp(const SStreamMeta* pMeta, const SStreamTaskCheckReq*
|
||||||
|
|
||||||
tEncodeSize(tEncodeStreamTaskCheckRsp, pRsp, len, code);
|
tEncodeSize(tEncodeStreamTaskCheckRsp, pRsp, len, code);
|
||||||
if (code < 0) {
|
if (code < 0) {
|
||||||
qError("vgId:%d failed to encode task check rsp, s-task:0x%x", pMeta->vgId, taskId);
|
stError("vgId:%d failed to encode task check rsp, s-task:0x%x", pMeta->vgId, taskId);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -335,22 +402,22 @@ int32_t streamSendCheckRsp(const SStreamMeta* pMeta, const SStreamTaskCheckReq*
|
||||||
|
|
||||||
// common
|
// common
|
||||||
int32_t streamSetParamForScanHistory(SStreamTask* pTask) {
|
int32_t streamSetParamForScanHistory(SStreamTask* pTask) {
|
||||||
qDebug("s-task:%s set operator option for scan-history data", pTask->id.idStr);
|
stDebug("s-task:%s set operator option for scan-history data", pTask->id.idStr);
|
||||||
return qSetStreamOperatorOptionForScanHistory(pTask->exec.pExecutor);
|
return qSetStreamOperatorOptionForScanHistory(pTask->exec.pExecutor);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamRestoreParam(SStreamTask* pTask) {
|
int32_t streamRestoreParam(SStreamTask* pTask) {
|
||||||
qDebug("s-task:%s restore operator param after scan-history", pTask->id.idStr);
|
stDebug("s-task:%s restore operator param after scan-history", pTask->id.idStr);
|
||||||
return qRestoreStreamOperatorOption(pTask->exec.pExecutor);
|
return qRestoreStreamOperatorOption(pTask->exec.pExecutor);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamSetStatusNormal(SStreamTask* pTask) {
|
int32_t streamSetStatusNormal(SStreamTask* pTask) {
|
||||||
int32_t status = atomic_load_8(&pTask->status.taskStatus);
|
int32_t status = atomic_load_8(&pTask->status.taskStatus);
|
||||||
if (status == TASK_STATUS__DROPPING) {
|
if (status == TASK_STATUS__DROPPING) {
|
||||||
qError("s-task:%s cannot be set normal, since in dropping state", pTask->id.idStr);
|
stError("s-task:%s cannot be set normal, since in dropping state", pTask->id.idStr);
|
||||||
return -1;
|
return -1;
|
||||||
} else {
|
} else {
|
||||||
qDebug("s-task:%s set task status to be normal, prev:%s", pTask->id.idStr, streamGetTaskStatusStr(status));
|
stDebug("s-task:%s set task status to be normal, prev:%s", pTask->id.idStr, streamGetTaskStatusStr(status));
|
||||||
atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__NORMAL);
|
atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__NORMAL);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -359,10 +426,10 @@ int32_t streamSetStatusNormal(SStreamTask* pTask) {
|
||||||
int32_t streamSetStatusUnint(SStreamTask* pTask) {
|
int32_t streamSetStatusUnint(SStreamTask* pTask) {
|
||||||
int32_t status = atomic_load_8(&pTask->status.taskStatus);
|
int32_t status = atomic_load_8(&pTask->status.taskStatus);
|
||||||
if (status == TASK_STATUS__DROPPING) {
|
if (status == TASK_STATUS__DROPPING) {
|
||||||
qError("s-task:%s cannot be set uninit, since in dropping state", pTask->id.idStr);
|
stError("s-task:%s cannot be set uninit, since in dropping state", pTask->id.idStr);
|
||||||
return -1;
|
return -1;
|
||||||
} else {
|
} else {
|
||||||
qDebug("s-task:%s set task status to be uninit, prev:%s", pTask->id.idStr, streamGetTaskStatusStr(status));
|
stDebug("s-task:%s set task status to be uninit, prev:%s", pTask->id.idStr, streamGetTaskStatusStr(status));
|
||||||
atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__UNINIT);
|
atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__UNINIT);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -434,7 +501,7 @@ int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistory
|
||||||
ASSERT(taskLevel == TASK_LEVEL__AGG || taskLevel == TASK_LEVEL__SINK);
|
ASSERT(taskLevel == TASK_LEVEL__AGG || taskLevel == TASK_LEVEL__SINK);
|
||||||
|
|
||||||
if (pTask->status.taskStatus != TASK_STATUS__SCAN_HISTORY) {
|
if (pTask->status.taskStatus != TASK_STATUS__SCAN_HISTORY) {
|
||||||
qError("s-task:%s not in scan-history status, status:%s return upstream:0x%x scan-history finish directly",
|
stError("s-task:%s not in scan-history status, status:%s return upstream:0x%x scan-history finish directly",
|
||||||
pTask->id.idStr, streamGetTaskStatusStr(pTask->status.taskStatus), pReq->upstreamTaskId);
|
pTask->id.idStr, streamGetTaskStatusStr(pTask->status.taskStatus), pReq->upstreamTaskId);
|
||||||
|
|
||||||
void* pBuf = NULL;
|
void* pBuf = NULL;
|
||||||
|
@ -445,7 +512,7 @@ int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistory
|
||||||
initRpcMsg(&msg, 0, pBuf, sizeof(SMsgHead) + len);
|
initRpcMsg(&msg, 0, pBuf, sizeof(SMsgHead) + len);
|
||||||
|
|
||||||
tmsgSendRsp(&msg);
|
tmsgSendRsp(&msg);
|
||||||
qDebug("s-task:%s level:%d notify upstream:0x%x(vgId:%d) to continue process data in WAL", pTask->id.idStr,
|
stDebug("s-task:%s level:%d notify upstream:0x%x(vgId:%d) to continue process data in WAL", pTask->id.idStr,
|
||||||
pTask->info.taskLevel, pReq->upstreamTaskId, pReq->upstreamNodeId);
|
pTask->info.taskLevel, pReq->upstreamTaskId, pReq->upstreamNodeId);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -458,7 +525,7 @@ int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistory
|
||||||
|
|
||||||
if (left == 0) {
|
if (left == 0) {
|
||||||
int32_t numOfTasks = taosArrayGetSize(pTask->pUpstreamInfoList);
|
int32_t numOfTasks = taosArrayGetSize(pTask->pUpstreamInfoList);
|
||||||
qDebug(
|
stDebug(
|
||||||
"s-task:%s all %d upstream tasks finish scan-history data, set param for agg task for stream data and send "
|
"s-task:%s all %d upstream tasks finish scan-history data, set param for agg task for stream data and send "
|
||||||
"rsp to all upstream tasks",
|
"rsp to all upstream tasks",
|
||||||
pTask->id.idStr, numOfTasks);
|
pTask->id.idStr, numOfTasks);
|
||||||
|
@ -481,7 +548,7 @@ int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistory
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
qDebug("s-task:%s receive scan-history data finish msg from upstream:0x%x(index:%d), unfinished:%d",
|
stDebug("s-task:%s receive scan-history data finish msg from upstream:0x%x(index:%d), unfinished:%d",
|
||||||
pTask->id.idStr, pReq->upstreamTaskId, pReq->childId, left);
|
pTask->id.idStr, pReq->upstreamTaskId, pReq->childId, left);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -518,12 +585,12 @@ static void checkFillhistoryTaskStatus(SStreamTask* pTask, SStreamTask* pHTask)
|
||||||
pHTask->dataRange.range.maxVer = pTask->chkInfo.nextProcessVer - 1;
|
pHTask->dataRange.range.maxVer = pTask->chkInfo.nextProcessVer - 1;
|
||||||
|
|
||||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||||
qDebug("s-task:%s set the launch condition for fill-history s-task:%s, window:%" PRId64 " - %" PRId64
|
stDebug("s-task:%s set the launch condition for fill-history s-task:%s, window:%" PRId64 " - %" PRId64
|
||||||
" ver range:%" PRId64 " - %" PRId64,
|
" ver range:%" PRId64 " - %" PRId64,
|
||||||
pTask->id.idStr, pHTask->id.idStr, pHTask->dataRange.window.skey, pHTask->dataRange.window.ekey,
|
pTask->id.idStr, pHTask->id.idStr, pHTask->dataRange.window.skey, pHTask->dataRange.window.ekey,
|
||||||
pHTask->dataRange.range.minVer, pHTask->dataRange.range.maxVer);
|
pHTask->dataRange.range.minVer, pHTask->dataRange.range.maxVer);
|
||||||
} else {
|
} else {
|
||||||
qDebug("s-task:%s no fill history condition for non-source task:%s", pTask->id.idStr, pHTask->id.idStr);
|
stDebug("s-task:%s no fill history condition for non-source task:%s", pTask->id.idStr, pHTask->id.idStr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if downstream tasks have been ready
|
// check if downstream tasks have been ready
|
||||||
|
@ -531,11 +598,9 @@ static void checkFillhistoryTaskStatus(SStreamTask* pTask, SStreamTask* pHTask)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tryLaunchHistoryTask(void* param, void* tmrId) {
|
static void tryLaunchHistoryTask(void* param, void* tmrId) {
|
||||||
SStreamTaskRetryInfo* pInfo = param;
|
SLaunchHTaskInfo* pInfo = param;
|
||||||
SStreamMeta* pMeta = pInfo->pMeta;
|
SStreamMeta* pMeta = pInfo->pMeta;
|
||||||
|
|
||||||
qDebug("s-task:0x%x in timer to launch related history task", (int32_t) pInfo->id.taskId);
|
|
||||||
|
|
||||||
taosWLockLatch(&pMeta->lock);
|
taosWLockLatch(&pMeta->lock);
|
||||||
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pInfo->id, sizeof(pInfo->id));
|
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pInfo->id, sizeof(pInfo->id));
|
||||||
if (ppTask) {
|
if (ppTask) {
|
||||||
|
@ -543,10 +608,12 @@ static void tryLaunchHistoryTask(void* param, void* tmrId) {
|
||||||
|
|
||||||
if (streamTaskShouldStop(&(*ppTask)->status)) {
|
if (streamTaskShouldStop(&(*ppTask)->status)) {
|
||||||
const char* pStatus = streamGetTaskStatusStr((*ppTask)->status.taskStatus);
|
const char* pStatus = streamGetTaskStatusStr((*ppTask)->status.taskStatus);
|
||||||
qDebug("s-task:%s status:%s quit timer task", (*ppTask)->id.idStr, pStatus);
|
|
||||||
|
int32_t ref = atomic_sub_fetch_32(&(*ppTask)->status.timerActive, 1);
|
||||||
|
stDebug("s-task:%s status:%s should stop, quit launch fill-history task timer, retry:%d, ref:%d",
|
||||||
|
(*ppTask)->id.idStr, pStatus, (*ppTask)->hTaskInfo.retryTimes, ref);
|
||||||
|
|
||||||
taosMemoryFree(pInfo);
|
taosMemoryFree(pInfo);
|
||||||
atomic_sub_fetch_8(&(*ppTask)->status.timerActive, 1);
|
|
||||||
taosWUnLockLatch(&pMeta->lock);
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -555,18 +622,39 @@ static void tryLaunchHistoryTask(void* param, void* tmrId) {
|
||||||
|
|
||||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pInfo->id.streamId, pInfo->id.taskId);
|
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pInfo->id.streamId, pInfo->id.taskId);
|
||||||
if (pTask != NULL) {
|
if (pTask != NULL) {
|
||||||
|
|
||||||
|
SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo;
|
||||||
|
|
||||||
|
pHTaskInfo->tickCount -= 1;
|
||||||
|
if (pHTaskInfo->tickCount > 0) {
|
||||||
|
taosTmrReset(tryLaunchHistoryTask, LAUNCH_HTASK_INTERVAL, pInfo, streamEnv.timer, &pHTaskInfo->pTimer);
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pHTaskInfo->retryTimes > MAX_RETRY_LAUNCH_HISTORY_TASK) {
|
||||||
|
int8_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||||
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
|
||||||
|
stError("s-task:%s max retry:%d reached, quit from retrying launch related fill-history task:0x%x, ref:%d",
|
||||||
|
pTask->id.idStr, MAX_RETRY_LAUNCH_HISTORY_TASK, (int32_t)pHTaskInfo->id.taskId, ref);
|
||||||
|
|
||||||
|
pHTaskInfo->id.taskId = 0;
|
||||||
|
pHTaskInfo->id.streamId = 0;
|
||||||
|
} else { // not reach the limitation yet, let's continue retrying launch related fill-history task.
|
||||||
|
streamTaskSetRetryInfoForLaunch(pHTaskInfo);
|
||||||
ASSERT(pTask->status.timerActive >= 1);
|
ASSERT(pTask->status.timerActive >= 1);
|
||||||
|
|
||||||
// abort the timer if intend to stop task
|
// abort the timer if intend to stop task
|
||||||
SStreamTask* pHTask = streamMetaAcquireTask(pMeta, pTask->historyTaskId.streamId, pTask->historyTaskId.taskId);
|
SStreamTask* pHTask = streamMetaAcquireTask(pMeta, pHTaskInfo->id.streamId, pHTaskInfo->id.taskId);
|
||||||
if (pHTask == NULL && (!streamTaskShouldStop(&pTask->status))) {
|
if (pHTask == NULL && (!streamTaskShouldStop(&pTask->status))) {
|
||||||
const char* pStatus = streamGetTaskStatusStr(pTask->status.taskStatus);
|
const char* p = streamGetTaskStatusStr(pTask->status.taskStatus);
|
||||||
qWarn(
|
int32_t hTaskId = pHTaskInfo->id.taskId;
|
||||||
"s-task:%s vgId:%d status:%s failed to launch history task:0x%x, since it may not be built, or may have been "
|
stDebug(
|
||||||
"destroyed, or should stop",
|
"s-task:%s status:%s failed to launch fill-history task:0x%x, retry launch:%dms, retryCount:%d",
|
||||||
pTask->id.idStr, pMeta->vgId, pStatus, (int32_t) pTask->historyTaskId.taskId);
|
pTask->id.idStr, p, hTaskId, pHTaskInfo->waitInterval, pHTaskInfo->retryTimes);
|
||||||
|
|
||||||
taosTmrReset(tryLaunchHistoryTask, 100, pInfo, streamEnv.timer, &pTask->launchTaskTimer);
|
taosTmrReset(tryLaunchHistoryTask, LAUNCH_HTASK_INTERVAL, pInfo, streamEnv.timer, &pHTaskInfo->pTimer);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -577,61 +665,80 @@ static void tryLaunchHistoryTask(void* param, void* tmrId) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// not in timer anymore
|
// not in timer anymore
|
||||||
atomic_sub_fetch_8(&pTask->status.timerActive, 1);
|
int8_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||||
|
stDebug("s-task:0x%x fill-history task launch completed, retry times:%d, ref:%d", (int32_t)pInfo->id.taskId,
|
||||||
|
pHTaskInfo->retryTimes, ref);
|
||||||
streamMetaReleaseTask(pMeta, pTask);
|
streamMetaReleaseTask(pMeta, pTask);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
qError("s-task:0x%x failed to load task, it may have been destroyed", (int32_t) pInfo->id.taskId);
|
stError("s-task:0x%x failed to load task, it may have been destroyed, not launch related fill-history task",
|
||||||
|
(int32_t)pInfo->id.taskId);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosMemoryFree(pInfo);
|
taosMemoryFree(pInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo fix the bug: 2. race condition
|
SLaunchHTaskInfo* createHTaskLaunchInfo(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) {
|
||||||
|
SLaunchHTaskInfo* pInfo = taosMemoryCalloc(1, sizeof(SLaunchHTaskInfo));
|
||||||
|
if (pInfo == NULL) {
|
||||||
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
pInfo->id.taskId = taskId;
|
||||||
|
pInfo->id.streamId = streamId;
|
||||||
|
pInfo->pMeta = pMeta;
|
||||||
|
return pInfo;
|
||||||
|
}
|
||||||
|
|
||||||
// an fill history task needs to be started.
|
// an fill history task needs to be started.
|
||||||
int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) {
|
int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) {
|
||||||
SStreamMeta* pMeta = pTask->pMeta;
|
SStreamMeta* pMeta = pTask->pMeta;
|
||||||
int32_t hTaskId = pTask->historyTaskId.taskId;
|
int32_t hTaskId = pTask->hTaskInfo.id.taskId;
|
||||||
if (hTaskId == 0) {
|
if (hTaskId == 0) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(pTask->status.downstreamReady == 1);
|
ASSERT(pTask->status.downstreamReady == 1);
|
||||||
qDebug("s-task:%s start to launch related fill-history task:0x%" PRIx64 "-0x%x", pTask->id.idStr,
|
stDebug("s-task:%s start to launch related fill-history task:0x%" PRIx64 "-0x%x", pTask->id.idStr,
|
||||||
pTask->historyTaskId.streamId, hTaskId);
|
pTask->hTaskInfo.id.streamId, hTaskId);
|
||||||
|
|
||||||
// Set the execute conditions, including the query time window and the version range
|
// Set the execute conditions, including the query time window and the version range
|
||||||
SStreamTask** pHTask = taosHashGet(pMeta->pTasksMap, &pTask->historyTaskId, sizeof(pTask->historyTaskId));
|
SStreamTask** pHTask = taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id));
|
||||||
if (pHTask == NULL) {
|
if (pHTask == NULL) {
|
||||||
qWarn("s-task:%s vgId:%d failed to launch history task:0x%x, since it is not built yet", pTask->id.idStr,
|
stWarn("s-task:%s vgId:%d failed to launch history task:0x%x, since not built yet", pTask->id.idStr, pMeta->vgId,
|
||||||
pMeta->vgId, hTaskId);
|
hTaskId);
|
||||||
|
|
||||||
SStreamTaskRetryInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamTaskRetryInfo));
|
SLaunchHTaskInfo* pInfo = createHTaskLaunchInfo(pTask->pMeta, pTask->id.streamId, pTask->id.taskId);
|
||||||
pInfo->id.taskId = pTask->id.taskId;
|
if (pInfo == NULL) {
|
||||||
pInfo->id.streamId = pTask->id.streamId;
|
stError("s-task:%s failed to launch related fill-history task, since Out Of Memory", pTask->id.idStr);
|
||||||
pInfo->pMeta = pTask->pMeta;
|
return terrno;
|
||||||
|
}
|
||||||
|
|
||||||
if (pTask->launchTaskTimer == NULL) {
|
streamTaskInitForLaunchHTask(&pTask->hTaskInfo);
|
||||||
pTask->launchTaskTimer = taosTmrStart(tryLaunchHistoryTask, 100, pInfo, streamEnv.timer);
|
if (pTask->hTaskInfo.pTimer == NULL) {
|
||||||
if (pTask->launchTaskTimer == NULL) {
|
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
|
||||||
// todo failed to create timer
|
pTask->hTaskInfo.pTimer = taosTmrStart(tryLaunchHistoryTask, WAIT_FOR_MINIMAL_INTERVAL, pInfo, streamEnv.timer);
|
||||||
|
if (pTask->hTaskInfo.pTimer == NULL) { // todo failed to create timer
|
||||||
|
atomic_sub_fetch_32(&pTask->status.timerActive, 1);
|
||||||
|
stError("s-task:%s failed to start timer, related fill-history task not launched, ref:%d", pTask->id.idStr,
|
||||||
|
pTask->status.timerActive);
|
||||||
taosMemoryFree(pInfo);
|
taosMemoryFree(pInfo);
|
||||||
} else {
|
} else {
|
||||||
int32_t ref = atomic_add_fetch_8(&pTask->status.timerActive, 1);// timer is active
|
ASSERT(ref >= 1);
|
||||||
ASSERT(ref == 1);
|
stDebug("s-task:%s set timer active flag, ref:%d", pTask->id.idStr, ref);
|
||||||
qDebug("s-task:%s set timer active flag", pTask->id.idStr);
|
|
||||||
}
|
}
|
||||||
} else { // timer exists
|
} else { // timer exists
|
||||||
ASSERT(pTask->status.timerActive == 1);
|
ASSERT(pTask->status.timerActive >= 1);
|
||||||
qDebug("s-task:%s set timer active flag, task timer not null", pTask->id.idStr);
|
stDebug("s-task:%s set timer active flag, task timer not null", pTask->id.idStr);
|
||||||
taosTmrReset(tryLaunchHistoryTask, 100, pInfo, streamEnv.timer, &pTask->launchTaskTimer);
|
taosTmrReset(tryLaunchHistoryTask, WAIT_FOR_MINIMAL_INTERVAL, pInfo, streamEnv.timer, &pTask->hTaskInfo.pTimer);
|
||||||
}
|
}
|
||||||
|
|
||||||
// try again in 100ms
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((*pHTask)->status.downstreamReady == 1) {
|
if ((*pHTask)->status.downstreamReady == 1) {
|
||||||
qDebug("s-task:%s fill-history task is ready, no need to check downstream", (*pHTask)->id.idStr);
|
stDebug("s-task:%s fill-history task is ready, no need to check downstream", (*pHTask)->id.idStr);
|
||||||
} else {
|
} else {
|
||||||
checkFillhistoryTaskStatus(pTask, *pHTask);
|
checkFillhistoryTaskStatus(pTask, *pHTask);
|
||||||
}
|
}
|
||||||
|
@ -675,7 +782,7 @@ bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer) {
|
||||||
if (nextStartVer > latestVer - 1) {
|
if (nextStartVer > latestVer - 1) {
|
||||||
// no input data yet. no need to execute the secondardy scan while stream task halt
|
// no input data yet. no need to execute the secondardy scan while stream task halt
|
||||||
streamTaskFillHistoryFinished(pTask);
|
streamTaskFillHistoryFinished(pTask);
|
||||||
qDebug(
|
stDebug(
|
||||||
"s-task:%s no need to perform secondary scan-history data(step 2), since no data ingest during step1 scan, "
|
"s-task:%s no need to perform secondary scan-history data(step 2), since no data ingest during step1 scan, "
|
||||||
"related stream task currentVer:%" PRId64,
|
"related stream task currentVer:%" PRId64,
|
||||||
pTask->id.idStr, latestVer);
|
pTask->id.idStr, latestVer);
|
||||||
|
@ -770,14 +877,14 @@ int32_t tDecodeStreamScanHistoryFinishReq(SDecoder* pDecoder, SStreamScanHistory
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamTaskSetRangeStreamCalc(SStreamTask* pTask) {
|
void streamTaskSetRangeStreamCalc(SStreamTask* pTask) {
|
||||||
if (pTask->historyTaskId.taskId == 0) {
|
if (pTask->hTaskInfo.id.taskId == 0) {
|
||||||
SDataRange* pRange = &pTask->dataRange;
|
SDataRange* pRange = &pTask->dataRange;
|
||||||
if (pTask->info.fillHistory == 1) {
|
if (pTask->info.fillHistory == 1) {
|
||||||
qDebug("s-task:%s fill-history task, time window:%" PRId64 "-%" PRId64 ", verRange:%" PRId64
|
stDebug("s-task:%s fill-history task, time window:%" PRId64 "-%" PRId64 ", verRange:%" PRId64
|
||||||
"-%" PRId64,
|
"-%" PRId64,
|
||||||
pTask->id.idStr, pRange->window.skey, pRange->window.ekey, pRange->range.minVer, pRange->range.maxVer);
|
pTask->id.idStr, pRange->window.skey, pRange->window.ekey, pRange->range.minVer, pRange->range.maxVer);
|
||||||
} else {
|
} else {
|
||||||
qDebug("s-task:%s no related fill-history task, stream time window:%" PRId64 "-%" PRId64 ", verRange:%" PRId64
|
stDebug("s-task:%s no related fill-history task, stream time window:%" PRId64 "-%" PRId64 ", verRange:%" PRId64
|
||||||
"-%" PRId64,
|
"-%" PRId64,
|
||||||
pTask->id.idStr, pRange->window.skey, pRange->window.ekey, pRange->range.minVer, pRange->range.maxVer);
|
pTask->id.idStr, pRange->window.skey, pRange->window.ekey, pRange->range.minVer, pRange->range.maxVer);
|
||||||
}
|
}
|
||||||
|
@ -798,7 +905,7 @@ void streamTaskSetRangeStreamCalc(SStreamTask* pTask) {
|
||||||
pRange->range.minVer = 0;
|
pRange->range.minVer = 0;
|
||||||
pRange->range.maxVer = ver;
|
pRange->range.maxVer = ver;
|
||||||
|
|
||||||
qDebug("s-task:%s level:%d related fill-history task exists, update stream calc time window:%" PRId64 " - %" PRId64
|
stDebug("s-task:%s level:%d related fill-history task exists, update stream calc time window:%" PRId64 " - %" PRId64
|
||||||
", verRang:%" PRId64 " - %" PRId64,
|
", verRang:%" PRId64 " - %" PRId64,
|
||||||
pTask->id.idStr, pTask->info.taskLevel, pRange->window.skey, pRange->window.ekey, pRange->range.minVer,
|
pTask->id.idStr, pTask->info.taskLevel, pRange->window.skey, pRange->window.ekey, pRange->range.minVer,
|
||||||
pRange->range.maxVer);
|
pRange->range.maxVer);
|
||||||
|
@ -808,7 +915,7 @@ void streamTaskSetRangeStreamCalc(SStreamTask* pTask) {
|
||||||
// only the downstream tasks are ready, set the task to be ready to work.
|
// only the downstream tasks are ready, set the task to be ready to work.
|
||||||
void streamTaskCheckDownstream(SStreamTask* pTask) {
|
void streamTaskCheckDownstream(SStreamTask* pTask) {
|
||||||
if (pTask->info.fillHistory) {
|
if (pTask->info.fillHistory) {
|
||||||
qDebug("s-task:%s fill history task, wait for being launched", pTask->id.idStr);
|
stDebug("s-task:%s fill history task, wait for being launched", pTask->id.idStr);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -822,36 +929,42 @@ void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta) {
|
||||||
|
|
||||||
int8_t status = pTask->status.taskStatus;
|
int8_t status = pTask->status.taskStatus;
|
||||||
if (status == TASK_STATUS__DROPPING) {
|
if (status == TASK_STATUS__DROPPING) {
|
||||||
qDebug("vgId:%d s-task:%s task already dropped, do nothing", pMeta->vgId, pTask->id.idStr);
|
stDebug("vgId:%d s-task:%s task already dropped, do nothing", pMeta->vgId, pTask->id.idStr);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* str = streamGetTaskStatusStr(status);
|
const char* str = streamGetTaskStatusStr(status);
|
||||||
if (status == TASK_STATUS__STOP || status == TASK_STATUS__PAUSE) {
|
if (status == TASK_STATUS__STOP || status == TASK_STATUS__PAUSE) {
|
||||||
qDebug("vgId:%d s-task:%s task already stopped/paused, status:%s, do nothing", pMeta->vgId, pTask->id.idStr, str);
|
stDebug("vgId:%d s-task:%s task already stopped/paused, status:%s, do nothing", pMeta->vgId, pTask->id.idStr, str);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(pTask->info.taskLevel == TASK_LEVEL__SINK) {
|
if(pTask->info.taskLevel == TASK_LEVEL__SINK) {
|
||||||
int32_t num = atomic_add_fetch_32(&pMeta->numOfPausedTasks, 1);
|
int32_t num = atomic_add_fetch_32(&pMeta->numOfPausedTasks, 1);
|
||||||
qInfo("vgId:%d s-task:%s pause stream sink task. pause task num:%d", pMeta->vgId, pTask->id.idStr, num);
|
stInfo("vgId:%d s-task:%s pause stream sink task. pause task num:%d", pMeta->vgId, pTask->id.idStr, num);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (!pTask->status.pauseAllowed || (pTask->status.taskStatus == TASK_STATUS__HALT)) {
|
while (!pTask->status.pauseAllowed || (pTask->status.taskStatus == TASK_STATUS__HALT)) {
|
||||||
status = pTask->status.taskStatus;
|
status = pTask->status.taskStatus;
|
||||||
if (status == TASK_STATUS__DROPPING) {
|
if (status == TASK_STATUS__DROPPING) {
|
||||||
qDebug("vgId:%d s-task:%s task already dropped, do nothing", pMeta->vgId, pTask->id.idStr);
|
stDebug("vgId:%d s-task:%s task already dropped, do nothing", pMeta->vgId, pTask->id.idStr);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (status == TASK_STATUS__STOP || status == TASK_STATUS__PAUSE) {
|
if (status == TASK_STATUS__STOP || status == TASK_STATUS__PAUSE) {
|
||||||
qDebug("vgId:%d s-task:%s task already stopped/paused, status:%s, do nothing", pMeta->vgId, pTask->id.idStr, str);
|
stDebug("vgId:%d s-task:%s task already stopped/paused, status:%s, do nothing", pMeta->vgId, pTask->id.idStr, str);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
//
|
||||||
|
// if (pTask->status.downstreamReady == 0) {
|
||||||
|
// ASSERT(pTask->execInfo.start == 0);
|
||||||
|
// stDebug("s-task:%s in check downstream procedure, abort and paused", pTask->id.idStr);
|
||||||
|
// break;
|
||||||
|
// }
|
||||||
|
|
||||||
const char* pStatus = streamGetTaskStatusStr(status);
|
const char* pStatus = streamGetTaskStatusStr(status);
|
||||||
qDebug("s-task:%s wait for the task can be paused, status:%s, vgId:%d", pTask->id.idStr, pStatus, pMeta->vgId);
|
stDebug("s-task:%s wait for the task can be paused, status:%s, vgId:%d", pTask->id.idStr, pStatus, pMeta->vgId);
|
||||||
taosMsleep(100);
|
taosMsleep(100);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -861,14 +974,14 @@ void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta) {
|
||||||
status = pTask->status.taskStatus;
|
status = pTask->status.taskStatus;
|
||||||
if (status == TASK_STATUS__DROPPING || status == TASK_STATUS__STOP) {
|
if (status == TASK_STATUS__DROPPING || status == TASK_STATUS__STOP) {
|
||||||
taosWUnLockLatch(&pMeta->lock);
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
qDebug("vgId:%d s-task:%s task already dropped/stopped/paused, do nothing", pMeta->vgId, pTask->id.idStr);
|
stDebug("vgId:%d s-task:%s task already dropped/stopped/paused, do nothing", pMeta->vgId, pTask->id.idStr);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_store_8(&pTask->status.keepTaskStatus, pTask->status.taskStatus);
|
atomic_store_8(&pTask->status.keepTaskStatus, pTask->status.taskStatus);
|
||||||
atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__PAUSE);
|
atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__PAUSE);
|
||||||
int32_t num = atomic_add_fetch_32(&pMeta->numOfPausedTasks, 1);
|
int32_t num = atomic_add_fetch_32(&pMeta->numOfPausedTasks, 1);
|
||||||
qInfo("vgId:%d s-task:%s pause stream task. pause task num:%d", pMeta->vgId, pTask->id.idStr, num);
|
stInfo("vgId:%d s-task:%s pause stream task. pause task num:%d", pMeta->vgId, pTask->id.idStr, num);
|
||||||
taosWUnLockLatch(&pMeta->lock);
|
taosWUnLockLatch(&pMeta->lock);
|
||||||
|
|
||||||
// in case of fill-history task, stop the tsdb file scan operation.
|
// in case of fill-history task, stop the tsdb file scan operation.
|
||||||
|
@ -878,7 +991,7 @@ void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t el = taosGetTimestampMs() - st;
|
int64_t el = taosGetTimestampMs() - st;
|
||||||
qDebug("vgId:%d s-task:%s set pause flag, prev:%s, pause elapsed time:%dms", pMeta->vgId, pTask->id.idStr,
|
stDebug("vgId:%d s-task:%s set pause flag, prev:%s, pause elapsed time:%dms", pMeta->vgId, pTask->id.idStr,
|
||||||
streamGetTaskStatusStr(pTask->status.keepTaskStatus), (int32_t)el);
|
streamGetTaskStatusStr(pTask->status.keepTaskStatus), (int32_t)el);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -888,12 +1001,12 @@ void streamTaskResume(SStreamTask* pTask, SStreamMeta* pMeta) {
|
||||||
pTask->status.taskStatus = pTask->status.keepTaskStatus;
|
pTask->status.taskStatus = pTask->status.keepTaskStatus;
|
||||||
pTask->status.keepTaskStatus = TASK_STATUS__NORMAL;
|
pTask->status.keepTaskStatus = TASK_STATUS__NORMAL;
|
||||||
int32_t num = atomic_sub_fetch_32(&pMeta->numOfPausedTasks, 1);
|
int32_t num = atomic_sub_fetch_32(&pMeta->numOfPausedTasks, 1);
|
||||||
qInfo("vgId:%d s-task:%s resume from pause, status:%s. pause task num:%d", pMeta->vgId, pTask->id.idStr, streamGetTaskStatusStr(status), num);
|
stInfo("vgId:%d s-task:%s resume from pause, status:%s. pause task num:%d", pMeta->vgId, pTask->id.idStr, streamGetTaskStatusStr(status), num);
|
||||||
} else if (pTask->info.taskLevel == TASK_LEVEL__SINK) {
|
} else if (pTask->info.taskLevel == TASK_LEVEL__SINK) {
|
||||||
int32_t num = atomic_sub_fetch_32(&pMeta->numOfPausedTasks, 1);
|
int32_t num = atomic_sub_fetch_32(&pMeta->numOfPausedTasks, 1);
|
||||||
qInfo("vgId:%d s-task:%s sink task.resume from pause, status:%s. pause task num:%d", pMeta->vgId, pTask->id.idStr, streamGetTaskStatusStr(status), num);
|
stInfo("vgId:%d s-task:%s sink task.resume from pause, status:%s. pause task num:%d", pMeta->vgId, pTask->id.idStr, streamGetTaskStatusStr(status), num);
|
||||||
} else {
|
} else {
|
||||||
qError("s-task:%s not in pause, failed to resume, status:%s", pTask->id.idStr, streamGetTaskStatusStr(status));
|
stError("s-task:%s not in pause, failed to resume, status:%s", pTask->id.idStr, streamGetTaskStatusStr(status));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -902,57 +1015,28 @@ void streamTaskDisablePause(SStreamTask* pTask) {
|
||||||
// pre-condition check
|
// pre-condition check
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
while (pTask->status.taskStatus == TASK_STATUS__PAUSE) {
|
while (pTask->status.taskStatus == TASK_STATUS__PAUSE) {
|
||||||
qDebug("s-task:%s already in pause, wait for pause being cancelled, and set pause disabled, recheck in 100ms", id);
|
stDebug("s-task:%s already in pause, wait for pause being cancelled, and set pause disabled, recheck in 100ms", id);
|
||||||
taosMsleep(100);
|
taosMsleep(100);
|
||||||
}
|
}
|
||||||
|
|
||||||
qDebug("s-task:%s disable task pause", id);
|
stDebug("s-task:%s disable task pause", id);
|
||||||
pTask->status.pauseAllowed = 0;
|
pTask->status.pauseAllowed = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamTaskEnablePause(SStreamTask* pTask) {
|
void streamTaskEnablePause(SStreamTask* pTask) {
|
||||||
qDebug("s-task:%s enable task pause", pTask->id.idStr);
|
stDebug("s-task:%s enable task pause", pTask->id.idStr);
|
||||||
pTask->status.pauseAllowed = 1;
|
pTask->status.pauseAllowed = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamTaskHalt(SStreamTask* pTask) {
|
|
||||||
int8_t status = pTask->status.taskStatus;
|
|
||||||
if (status == TASK_STATUS__DROPPING || status == TASK_STATUS__STOP) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (status == TASK_STATUS__HALT) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// wait for checkpoint completed
|
|
||||||
while(pTask->status.taskStatus == TASK_STATUS__CK) {
|
|
||||||
qDebug("s-task:%s status:%s during generating checkpoint, wait for 1sec and retry set status:halt", pTask->id.idStr,
|
|
||||||
streamGetTaskStatusStr(TASK_STATUS__CK));
|
|
||||||
taosMsleep(1000);
|
|
||||||
}
|
|
||||||
|
|
||||||
// upgrade to halt status
|
|
||||||
if (status == TASK_STATUS__PAUSE) {
|
|
||||||
qDebug("s-task:%s upgrade status to %s from %s", pTask->id.idStr, streamGetTaskStatusStr(TASK_STATUS__HALT),
|
|
||||||
streamGetTaskStatusStr(TASK_STATUS__PAUSE));
|
|
||||||
} else {
|
|
||||||
qDebug("s-task:%s halt task", pTask->id.idStr);
|
|
||||||
}
|
|
||||||
|
|
||||||
pTask->status.keepTaskStatus = status;
|
|
||||||
pTask->status.taskStatus = TASK_STATUS__HALT;
|
|
||||||
}
|
|
||||||
|
|
||||||
void streamTaskResumeFromHalt(SStreamTask* pTask) {
|
void streamTaskResumeFromHalt(SStreamTask* pTask) {
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
int8_t status = pTask->status.taskStatus;
|
int8_t status = pTask->status.taskStatus;
|
||||||
if (status != TASK_STATUS__HALT) {
|
if (status != TASK_STATUS__HALT) {
|
||||||
qError("s-task:%s not in halt status, status:%s", id, streamGetTaskStatusStr(status));
|
stError("s-task:%s not in halt status, status:%s", id, streamGetTaskStatusStr(status));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
pTask->status.taskStatus = pTask->status.keepTaskStatus;
|
pTask->status.taskStatus = pTask->status.keepTaskStatus;
|
||||||
pTask->status.keepTaskStatus = TASK_STATUS__NORMAL;
|
pTask->status.keepTaskStatus = TASK_STATUS__NORMAL;
|
||||||
qDebug("s-task:%s resume from halt, current status:%s", id, streamGetTaskStatusStr(pTask->status.taskStatus));
|
stDebug("s-task:%s resume from halt, current status:%s", id, streamGetTaskStatusStr(pTask->status.taskStatus));
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include "rocksdb/c.h"
|
#include "rocksdb/c.h"
|
||||||
#include "streamBackendRocksdb.h"
|
#include "streamBackendRocksdb.h"
|
||||||
#include "tcommon.h"
|
#include "tcommon.h"
|
||||||
|
#include "streamInt.h"
|
||||||
|
|
||||||
enum SBackendFileType {
|
enum SBackendFileType {
|
||||||
ROCKSDB_OPTIONS_TYPE = 1,
|
ROCKSDB_OPTIONS_TYPE = 1,
|
||||||
|
@ -122,10 +123,10 @@ int32_t streamSnapHandleInit(SStreamSnapHandle* pHandle, char* path, int64_t chk
|
||||||
chkpId);
|
chkpId);
|
||||||
if (taosIsDir(tdir)) {
|
if (taosIsDir(tdir)) {
|
||||||
validChkp = 1;
|
validChkp = 1;
|
||||||
qInfo("%s start to read snap %s", STREAM_STATE_TRANSFER, tdir);
|
stInfo("%s start to read snap %s", STREAM_STATE_TRANSFER, tdir);
|
||||||
streamBackendAddInUseChkp(pMeta, chkpId);
|
streamBackendAddInUseChkp(pMeta, chkpId);
|
||||||
} else {
|
} else {
|
||||||
qWarn("%s failed to read from %s, reason: dir not exist,retry to default state dir", STREAM_STATE_TRANSFER, tdir);
|
stWarn("%s failed to read from %s, reason: dir not exist,retry to default state dir", STREAM_STATE_TRANSFER, tdir);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,22 +138,22 @@ int32_t streamSnapHandleInit(SStreamSnapHandle* pHandle, char* path, int64_t chk
|
||||||
taosMemoryFree(tdir);
|
taosMemoryFree(tdir);
|
||||||
|
|
||||||
tdir = chkpdir;
|
tdir = chkpdir;
|
||||||
qInfo("%s start to trigger checkpoint on %s", STREAM_STATE_TRANSFER, tdir);
|
stInfo("%s start to trigger checkpoint on %s", STREAM_STATE_TRANSFER, tdir);
|
||||||
|
|
||||||
code = streamBackendTriggerChkp(pMeta, tdir);
|
code = streamBackendTriggerChkp(pMeta, tdir);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
qError("%s failed to trigger chekckpoint at %s", STREAM_STATE_TRANSFER, tdir);
|
stError("%s failed to trigger chekckpoint at %s", STREAM_STATE_TRANSFER, tdir);
|
||||||
taosMemoryFree(tdir);
|
taosMemoryFree(tdir);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
chkpId = 0;
|
chkpId = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
qInfo("%s start to read dir: %s", STREAM_STATE_TRANSFER, tdir);
|
stInfo("%s start to read dir: %s", STREAM_STATE_TRANSFER, tdir);
|
||||||
|
|
||||||
TdDirPtr pDir = taosOpenDir(tdir);
|
TdDirPtr pDir = taosOpenDir(tdir);
|
||||||
if (NULL == pDir) {
|
if (NULL == pDir) {
|
||||||
qError("%s failed to open %s", STREAM_STATE_TRANSFER, tdir);
|
stError("%s failed to open %s", STREAM_STATE_TRANSFER, tdir);
|
||||||
goto _err;
|
goto _err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -202,14 +203,14 @@ int32_t streamSnapHandleInit(SStreamSnapHandle* pHandle, char* path, int64_t chk
|
||||||
}
|
}
|
||||||
sprintf(buf + strlen(buf) - 1, "]");
|
sprintf(buf + strlen(buf) - 1, "]");
|
||||||
|
|
||||||
qInfo("%s get file list: %s", STREAM_STATE_TRANSFER, buf);
|
stInfo("%s get file list: %s", STREAM_STATE_TRANSFER, buf);
|
||||||
taosMemoryFree(buf);
|
taosMemoryFree(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosCloseDir(&pDir);
|
taosCloseDir(&pDir);
|
||||||
|
|
||||||
if (pFile->pCurrent == NULL) {
|
if (pFile->pCurrent == NULL) {
|
||||||
qError("%s failed to open %s, reason: no valid file", STREAM_STATE_TRANSFER, tdir);
|
stError("%s failed to open %s, reason: no valid file", STREAM_STATE_TRANSFER, tdir);
|
||||||
code = -1;
|
code = -1;
|
||||||
tdir = NULL;
|
tdir = NULL;
|
||||||
goto _err;
|
goto _err;
|
||||||
|
@ -333,24 +334,24 @@ int32_t streamSnapRead(SStreamSnapReader* pReader, uint8_t** ppData, int64_t* si
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
pHandle->fd = streamOpenFile(pFile->path, item->name, TD_FILE_READ);
|
pHandle->fd = streamOpenFile(pFile->path, item->name, TD_FILE_READ);
|
||||||
qDebug("%s open file %s, current offset:%" PRId64 ", size:% " PRId64 ", file no.%d", STREAM_STATE_TRANSFER,
|
stDebug("%s open file %s, current offset:%" PRId64 ", size:% " PRId64 ", file no.%d", STREAM_STATE_TRANSFER,
|
||||||
item->name, (int64_t)pHandle->offset, item->size, pHandle->currFileIdx);
|
item->name, (int64_t)pHandle->offset, item->size, pHandle->currFileIdx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
qDebug("%s start to read file %s, current offset:%" PRId64 ", size:%" PRId64 ", file no.%d", STREAM_STATE_TRANSFER,
|
stDebug("%s start to read file %s, current offset:%" PRId64 ", size:%" PRId64 ", file no.%d", STREAM_STATE_TRANSFER,
|
||||||
item->name, (int64_t)pHandle->offset, item->size, pHandle->currFileIdx);
|
item->name, (int64_t)pHandle->offset, item->size, pHandle->currFileIdx);
|
||||||
uint8_t* buf = taosMemoryCalloc(1, sizeof(SStreamSnapBlockHdr) + kBlockSize);
|
uint8_t* buf = taosMemoryCalloc(1, sizeof(SStreamSnapBlockHdr) + kBlockSize);
|
||||||
int64_t nread = taosPReadFile(pHandle->fd, buf + sizeof(SStreamSnapBlockHdr), kBlockSize, pHandle->offset);
|
int64_t nread = taosPReadFile(pHandle->fd, buf + sizeof(SStreamSnapBlockHdr), kBlockSize, pHandle->offset);
|
||||||
if (nread == -1) {
|
if (nread == -1) {
|
||||||
taosMemoryFree(buf);
|
taosMemoryFree(buf);
|
||||||
code = TAOS_SYSTEM_ERROR(terrno);
|
code = TAOS_SYSTEM_ERROR(terrno);
|
||||||
qError("%s snap failed to read snap, file name:%s, type:%d,reason:%s", STREAM_STATE_TRANSFER, item->name,
|
stError("%s snap failed to read snap, file name:%s, type:%d,reason:%s", STREAM_STATE_TRANSFER, item->name,
|
||||||
item->type, tstrerror(code));
|
item->type, tstrerror(code));
|
||||||
return -1;
|
return -1;
|
||||||
} else if (nread > 0 && nread <= kBlockSize) {
|
} else if (nread > 0 && nread <= kBlockSize) {
|
||||||
// left bytes less than kBlockSize
|
// left bytes less than kBlockSize
|
||||||
qDebug("%s read file %s, current offset:%" PRId64 ",size:% " PRId64 ", file no.%d", STREAM_STATE_TRANSFER,
|
stDebug("%s read file %s, current offset:%" PRId64 ",size:% " PRId64 ", file no.%d", STREAM_STATE_TRANSFER,
|
||||||
item->name, (int64_t)pHandle->offset, item->size, pHandle->currFileIdx);
|
item->name, (int64_t)pHandle->offset, item->size, pHandle->currFileIdx);
|
||||||
pHandle->offset += nread;
|
pHandle->offset += nread;
|
||||||
if (pHandle->offset >= item->size || nread < kBlockSize) {
|
if (pHandle->offset >= item->size || nread < kBlockSize) {
|
||||||
|
@ -359,7 +360,7 @@ int32_t streamSnapRead(SStreamSnapReader* pReader, uint8_t** ppData, int64_t* si
|
||||||
pHandle->currFileIdx += 1;
|
pHandle->currFileIdx += 1;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
qDebug("%s no data read, close file no.%d, move to next file, open and read", STREAM_STATE_TRANSFER,
|
stDebug("%s no data read, close file no.%d, move to next file, open and read", STREAM_STATE_TRANSFER,
|
||||||
pHandle->currFileIdx);
|
pHandle->currFileIdx);
|
||||||
taosCloseFile(&pHandle->fd);
|
taosCloseFile(&pHandle->fd);
|
||||||
pHandle->offset = 0;
|
pHandle->offset = 0;
|
||||||
|
@ -377,7 +378,7 @@ int32_t streamSnapRead(SStreamSnapReader* pReader, uint8_t** ppData, int64_t* si
|
||||||
nread = taosPReadFile(pHandle->fd, buf + sizeof(SStreamSnapBlockHdr), kBlockSize, pHandle->offset);
|
nread = taosPReadFile(pHandle->fd, buf + sizeof(SStreamSnapBlockHdr), kBlockSize, pHandle->offset);
|
||||||
pHandle->offset += nread;
|
pHandle->offset += nread;
|
||||||
|
|
||||||
qDebug("%s open file and read file %s, current offset:%" PRId64 ", size:% " PRId64 ", file no.%d",
|
stDebug("%s open file and read file %s, current offset:%" PRId64 ", size:% " PRId64 ", file no.%d",
|
||||||
STREAM_STATE_TRANSFER, item->name, (int64_t)pHandle->offset, item->size, pHandle->currFileIdx);
|
STREAM_STATE_TRANSFER, item->name, (int64_t)pHandle->offset, item->size, pHandle->currFileIdx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -433,7 +434,7 @@ int32_t streamSnapWrite(SStreamSnapWriter* pWriter, uint8_t* pData, uint32_t nDa
|
||||||
pHandle->fd = streamOpenFile(pFile->path, pItem->name, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND);
|
pHandle->fd = streamOpenFile(pFile->path, pItem->name, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND);
|
||||||
if (pHandle->fd == NULL) {
|
if (pHandle->fd == NULL) {
|
||||||
code = TAOS_SYSTEM_ERROR(terrno);
|
code = TAOS_SYSTEM_ERROR(terrno);
|
||||||
qError("%s failed to open file name:%s%s%s, reason:%s", STREAM_STATE_TRANSFER, pFile->path, TD_DIRSEP, pHdr->name,
|
stError("%s failed to open file name:%s%s%s, reason:%s", STREAM_STATE_TRANSFER, pFile->path, TD_DIRSEP, pHdr->name,
|
||||||
tstrerror(code));
|
tstrerror(code));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -442,7 +443,7 @@ int32_t streamSnapWrite(SStreamSnapWriter* pWriter, uint8_t* pData, uint32_t nDa
|
||||||
int64_t bytes = taosPWriteFile(pHandle->fd, pHdr->data, pHdr->size, pHandle->offset);
|
int64_t bytes = taosPWriteFile(pHandle->fd, pHdr->data, pHdr->size, pHandle->offset);
|
||||||
if (bytes != pHdr->size) {
|
if (bytes != pHdr->size) {
|
||||||
code = TAOS_SYSTEM_ERROR(terrno);
|
code = TAOS_SYSTEM_ERROR(terrno);
|
||||||
qError("%s failed to write snap, file name:%s, reason:%s", STREAM_STATE_TRANSFER, pHdr->name, tstrerror(code));
|
stError("%s failed to write snap, file name:%s, reason:%s", STREAM_STATE_TRANSFER, pHdr->name, tstrerror(code));
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
pHandle->offset += bytes;
|
pHandle->offset += bytes;
|
||||||
|
@ -460,7 +461,7 @@ int32_t streamSnapWrite(SStreamSnapWriter* pWriter, uint8_t* pData, uint32_t nDa
|
||||||
pHandle->fd = streamOpenFile(pFile->path, pItem->name, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND);
|
pHandle->fd = streamOpenFile(pFile->path, pItem->name, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND);
|
||||||
if (pHandle->fd == NULL) {
|
if (pHandle->fd == NULL) {
|
||||||
code = TAOS_SYSTEM_ERROR(terrno);
|
code = TAOS_SYSTEM_ERROR(terrno);
|
||||||
qError("%s failed to open file name:%s%s%s, reason:%s", STREAM_STATE_TRANSFER, pFile->path, TD_DIRSEP, pHdr->name,
|
stError("%s failed to open file name:%s%s%s, reason:%s", STREAM_STATE_TRANSFER, pFile->path, TD_DIRSEP, pHdr->name,
|
||||||
tstrerror(code));
|
tstrerror(code));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -484,7 +485,7 @@ int32_t streamSnapWriterClose(SStreamSnapWriter* pWriter, int8_t rollback) {
|
||||||
n += sprintf(buf + n, "%s %" PRId64 "]", item->name, item->size);
|
n += sprintf(buf + n, "%s %" PRId64 "]", item->name, item->size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
qDebug("%s snap get file list, %s", STREAM_STATE_TRANSFER, buf);
|
stDebug("%s snap get file list, %s", STREAM_STATE_TRANSFER, buf);
|
||||||
taosMemoryFree(buf);
|
taosMemoryFree(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -91,7 +91,7 @@ int stateKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t szPage, int32_t pages) {
|
SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t szPage, int32_t pages) {
|
||||||
qDebug("open stream state, %s", path);
|
stDebug("open stream state, %s", path);
|
||||||
SStreamState* pState = taosMemoryCalloc(1, sizeof(SStreamState));
|
SStreamState* pState = taosMemoryCalloc(1, sizeof(SStreamState));
|
||||||
if (pState == NULL) {
|
if (pState == NULL) {
|
||||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||||
|
@ -139,7 +139,7 @@ SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t sz
|
||||||
pState->pTdbState->backendCfWrapperId = id;
|
pState->pTdbState->backendCfWrapperId = id;
|
||||||
pState->pTdbState->pBackendCfWrapper = taosAcquireRef(streamBackendCfWrapperId, id);
|
pState->pTdbState->pBackendCfWrapper = taosAcquireRef(streamBackendCfWrapperId, id);
|
||||||
// already exist stream task for
|
// already exist stream task for
|
||||||
qInfo("already exist stream-state for %s", pState->pTdbState->idstr);
|
stInfo("already exist stream-state for %s", pState->pTdbState->idstr);
|
||||||
// taosAcquireRef(streamBackendId, pState->streamBackendRid);
|
// taosAcquireRef(streamBackendId, pState->streamBackendRid);
|
||||||
}
|
}
|
||||||
taosThreadMutexUnlock(&pMeta->backendMutex);
|
taosThreadMutexUnlock(&pMeta->backendMutex);
|
||||||
|
@ -149,7 +149,7 @@ SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t sz
|
||||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT);
|
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT);
|
||||||
|
|
||||||
pState->parNameMap = tSimpleHashInit(1024, hashFn);
|
pState->parNameMap = tSimpleHashInit(1024, hashFn);
|
||||||
qInfo("succ to open state %p on backend %p 0x%" PRIx64 "-%d", pState, pMeta->streamBackend, pState->streamId,
|
stInfo("succ to open state %p on backend %p 0x%" PRIx64 "-%d", pState, pMeta->streamBackend, pState->streamId,
|
||||||
pState->taskId);
|
pState->taskId);
|
||||||
return pState;
|
return pState;
|
||||||
|
|
||||||
|
@ -468,7 +468,7 @@ int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void*
|
||||||
|
|
||||||
int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal) {
|
int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal) {
|
||||||
// todo refactor
|
// todo refactor
|
||||||
qDebug("streamStateReleaseBuf");
|
stDebug("streamStateReleaseBuf");
|
||||||
if (!pVal) {
|
if (!pVal) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -717,7 +717,7 @@ void streamStateFreeCur(SStreamStateCur* pCur) {
|
||||||
if (!pCur) {
|
if (!pCur) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
qDebug("streamStateFreeCur");
|
stDebug("streamStateFreeCur");
|
||||||
rocksdb_iter_destroy(pCur->iter);
|
rocksdb_iter_destroy(pCur->iter);
|
||||||
if (pCur->snapshot) rocksdb_release_snapshot(pCur->db, pCur->snapshot);
|
if (pCur->snapshot) rocksdb_release_snapshot(pCur->db, pCur->snapshot);
|
||||||
rocksdb_readoptions_destroy(pCur->readOpt);
|
rocksdb_readoptions_destroy(pCur->readOpt);
|
||||||
|
@ -736,7 +736,7 @@ void streamStateFreeVal(void* val) {
|
||||||
|
|
||||||
int32_t streamStateSessionPut(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen) {
|
int32_t streamStateSessionPut(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen) {
|
||||||
#ifdef USE_ROCKSDB
|
#ifdef USE_ROCKSDB
|
||||||
qDebug("===stream===save skey:%" PRId64 ", ekey:%" PRId64 ", groupId:%" PRIu64, key->win.skey, key->win.ekey,
|
stDebug("===stream===save skey:%" PRId64 ", ekey:%" PRId64 ", groupId:%" PRIu64, key->win.skey, key->win.ekey,
|
||||||
key->groupId);
|
key->groupId);
|
||||||
return streamStateSessionPut_rocksdb(pState, key, value, vLen);
|
return streamStateSessionPut_rocksdb(pState, key, value, vLen);
|
||||||
#else
|
#else
|
||||||
|
@ -771,7 +771,7 @@ int32_t streamStateSessionGet(SStreamState* pState, SSessionKey* key, void** pVa
|
||||||
|
|
||||||
int32_t streamStateSessionDel(SStreamState* pState, const SSessionKey* key) {
|
int32_t streamStateSessionDel(SStreamState* pState, const SSessionKey* key) {
|
||||||
#ifdef USE_ROCKSDB
|
#ifdef USE_ROCKSDB
|
||||||
qDebug("===stream===delete skey:%" PRId64 ", ekey:%" PRId64 ", groupId:%" PRIu64, key->win.skey, key->win.ekey,
|
stDebug("===stream===delete skey:%" PRId64 ", ekey:%" PRId64 ", groupId:%" PRIu64, key->win.skey, key->win.ekey,
|
||||||
key->groupId);
|
key->groupId);
|
||||||
return streamStateSessionDel_rocksdb(pState, key);
|
return streamStateSessionDel_rocksdb(pState, key);
|
||||||
#else
|
#else
|
||||||
|
@ -1088,7 +1088,7 @@ _end:
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char tbname[TSDB_TABLE_NAME_LEN]) {
|
int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char tbname[TSDB_TABLE_NAME_LEN]) {
|
||||||
qDebug("try to write to cf parname");
|
stDebug("try to write to cf parname");
|
||||||
#ifdef USE_ROCKSDB
|
#ifdef USE_ROCKSDB
|
||||||
if (tSimpleHashGetSize(pState->parNameMap) > MAX_TABLE_NAME_NUM) {
|
if (tSimpleHashGetSize(pState->parNameMap) > MAX_TABLE_NAME_NUM) {
|
||||||
if (tSimpleHashGet(pState->parNameMap, &groupId, sizeof(int64_t)) == NULL) {
|
if (tSimpleHashGet(pState->parNameMap, &groupId, sizeof(int64_t)) == NULL) {
|
||||||
|
|
|
@ -96,8 +96,8 @@ int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) {
|
||||||
if (tEncodeI64(pEncoder, pTask->chkInfo.checkpointVer) < 0) return -1;
|
if (tEncodeI64(pEncoder, pTask->chkInfo.checkpointVer) < 0) return -1;
|
||||||
if (tEncodeI8(pEncoder, pTask->info.fillHistory) < 0) return -1;
|
if (tEncodeI8(pEncoder, pTask->info.fillHistory) < 0) return -1;
|
||||||
|
|
||||||
if (tEncodeI64(pEncoder, pTask->historyTaskId.streamId)) return -1;
|
if (tEncodeI64(pEncoder, pTask->hTaskInfo.id.streamId)) return -1;
|
||||||
int32_t taskId = pTask->historyTaskId.taskId;
|
int32_t taskId = pTask->hTaskInfo.id.taskId;
|
||||||
if (tEncodeI32(pEncoder, taskId)) return -1;
|
if (tEncodeI32(pEncoder, taskId)) return -1;
|
||||||
|
|
||||||
if (tEncodeI64(pEncoder, pTask->streamTaskId.streamId)) return -1;
|
if (tEncodeI64(pEncoder, pTask->streamTaskId.streamId)) return -1;
|
||||||
|
@ -129,9 +129,9 @@ int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) {
|
||||||
} else if (pTask->outputInfo.type == TASK_OUTPUT__FETCH) {
|
} else if (pTask->outputInfo.type == TASK_OUTPUT__FETCH) {
|
||||||
if (tEncodeI8(pEncoder, pTask->fetchSink.reserved) < 0) return -1;
|
if (tEncodeI8(pEncoder, pTask->fetchSink.reserved) < 0) return -1;
|
||||||
} else if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
|
} else if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
|
||||||
if (tEncodeI32(pEncoder, pTask->fixedEpDispatcher.taskId) < 0) return -1;
|
if (tEncodeI32(pEncoder, pTask->fixedDispatcher.taskId) < 0) return -1;
|
||||||
if (tEncodeI32(pEncoder, pTask->fixedEpDispatcher.nodeId) < 0) return -1;
|
if (tEncodeI32(pEncoder, pTask->fixedDispatcher.nodeId) < 0) return -1;
|
||||||
if (tEncodeSEpSet(pEncoder, &pTask->fixedEpDispatcher.epSet) < 0) return -1;
|
if (tEncodeSEpSet(pEncoder, &pTask->fixedDispatcher.epSet) < 0) return -1;
|
||||||
} else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
} else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||||
if (tSerializeSUseDbRspImp(pEncoder, &pTask->shuffleDispatcher.dbInfo) < 0) return -1;
|
if (tSerializeSUseDbRspImp(pEncoder, &pTask->shuffleDispatcher.dbInfo) < 0) return -1;
|
||||||
if (tEncodeCStr(pEncoder, pTask->shuffleDispatcher.stbFullName) < 0) return -1;
|
if (tEncodeCStr(pEncoder, pTask->shuffleDispatcher.stbFullName) < 0) return -1;
|
||||||
|
@ -169,9 +169,9 @@ int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask) {
|
||||||
if (tDecodeI64(pDecoder, &pTask->chkInfo.checkpointVer) < 0) return -1;
|
if (tDecodeI64(pDecoder, &pTask->chkInfo.checkpointVer) < 0) return -1;
|
||||||
if (tDecodeI8(pDecoder, &pTask->info.fillHistory) < 0) return -1;
|
if (tDecodeI8(pDecoder, &pTask->info.fillHistory) < 0) return -1;
|
||||||
|
|
||||||
if (tDecodeI64(pDecoder, &pTask->historyTaskId.streamId)) return -1;
|
if (tDecodeI64(pDecoder, &pTask->hTaskInfo.id.streamId)) return -1;
|
||||||
if (tDecodeI32(pDecoder, &taskId)) return -1;
|
if (tDecodeI32(pDecoder, &taskId)) return -1;
|
||||||
pTask->historyTaskId.taskId = taskId;
|
pTask->hTaskInfo.id.taskId = taskId;
|
||||||
|
|
||||||
if (tDecodeI64(pDecoder, &pTask->streamTaskId.streamId)) return -1;
|
if (tDecodeI64(pDecoder, &pTask->streamTaskId.streamId)) return -1;
|
||||||
if (tDecodeI32(pDecoder, &taskId)) return -1;
|
if (tDecodeI32(pDecoder, &taskId)) return -1;
|
||||||
|
@ -211,9 +211,9 @@ int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask) {
|
||||||
} else if (pTask->outputInfo.type == TASK_OUTPUT__FETCH) {
|
} else if (pTask->outputInfo.type == TASK_OUTPUT__FETCH) {
|
||||||
if (tDecodeI8(pDecoder, &pTask->fetchSink.reserved) < 0) return -1;
|
if (tDecodeI8(pDecoder, &pTask->fetchSink.reserved) < 0) return -1;
|
||||||
} else if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
|
} else if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) {
|
||||||
if (tDecodeI32(pDecoder, &pTask->fixedEpDispatcher.taskId) < 0) return -1;
|
if (tDecodeI32(pDecoder, &pTask->fixedDispatcher.taskId) < 0) return -1;
|
||||||
if (tDecodeI32(pDecoder, &pTask->fixedEpDispatcher.nodeId) < 0) return -1;
|
if (tDecodeI32(pDecoder, &pTask->fixedDispatcher.nodeId) < 0) return -1;
|
||||||
if (tDecodeSEpSet(pDecoder, &pTask->fixedEpDispatcher.epSet) < 0) return -1;
|
if (tDecodeSEpSet(pDecoder, &pTask->fixedDispatcher.epSet) < 0) return -1;
|
||||||
} else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
} else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||||
if (tDeserializeSUseDbRspImp(pDecoder, &pTask->shuffleDispatcher.dbInfo) < 0) return -1;
|
if (tDeserializeSUseDbRspImp(pDecoder, &pTask->shuffleDispatcher.dbInfo) < 0) return -1;
|
||||||
if (tDecodeCStrTo(pDecoder, pTask->shuffleDispatcher.stbFullName) < 0) return -1;
|
if (tDecodeCStrTo(pDecoder, pTask->shuffleDispatcher.stbFullName) < 0) return -1;
|
||||||
|
@ -289,24 +289,21 @@ static void freeUpstreamItem(void* p) {
|
||||||
void tFreeStreamTask(SStreamTask* pTask) {
|
void tFreeStreamTask(SStreamTask* pTask) {
|
||||||
int32_t taskId = pTask->id.taskId;
|
int32_t taskId = pTask->id.taskId;
|
||||||
|
|
||||||
STaskExecStatisInfo* pStatis = &pTask->taskExecInfo;
|
STaskExecStatisInfo* pStatis = &pTask->execInfo;
|
||||||
|
|
||||||
qDebug("start to free s-task:0x%x, %p, state:%p, status:%s", taskId, pTask, pTask->pState,
|
stDebug("start to free s-task:0x%x, %p, state:%p, status:%s", taskId, pTask, pTask->pState,
|
||||||
streamGetTaskStatusStr(pTask->status.taskStatus));
|
streamGetTaskStatusStr(pTask->status.taskStatus));
|
||||||
|
|
||||||
qDebug("s-task:0x%x exec info: create:%" PRId64 ", init:%" PRId64 ", start:%" PRId64
|
stDebug("s-task:0x%x task exec summary: create:%" PRId64 ", init:%" PRId64 ", start:%" PRId64
|
||||||
", updateCount:%d latestUpdate:%" PRId64 ", latestCheckPoint:%" PRId64 ", ver:%" PRId64
|
", updateCount:%d latestUpdate:%" PRId64 ", latestCheckPoint:%" PRId64 ", ver:%" PRId64
|
||||||
" nextProcessVer:%" PRId64,
|
" nextProcessVer:%" PRId64", checkpointCount:%d",
|
||||||
taskId, pStatis->created, pStatis->init, pStatis->start, pStatis->updateCount, pStatis->latestUpdateTs,
|
taskId, pStatis->created, pStatis->init, pStatis->start, pStatis->updateCount, pStatis->latestUpdateTs,
|
||||||
pTask->chkInfo.checkpointId, pTask->chkInfo.checkpointVer, pTask->chkInfo.nextProcessVer);
|
pTask->chkInfo.checkpointId, pTask->chkInfo.checkpointVer, pTask->chkInfo.nextProcessVer,
|
||||||
|
pStatis->checkpoint);
|
||||||
if (pStatis->created == 0 || pStatis->init == 0 || pStatis->start == 0) {
|
|
||||||
int32_t k = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove the ref by timer
|
// remove the ref by timer
|
||||||
while (pTask->status.timerActive > 0) {
|
while (pTask->status.timerActive > 0) {
|
||||||
qDebug("s-task:%s wait for task stop timer activities", pTask->id.idStr);
|
stDebug("s-task:%s wait for task stop timer activities", pTask->id.idStr);
|
||||||
taosMsleep(10);
|
taosMsleep(10);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -315,9 +312,14 @@ void tFreeStreamTask(SStreamTask* pTask) {
|
||||||
pTask->schedInfo.pTimer = NULL;
|
pTask->schedInfo.pTimer = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pTask->launchTaskTimer != NULL) {
|
if (pTask->hTaskInfo.pTimer != NULL) {
|
||||||
taosTmrStop(pTask->launchTaskTimer);
|
taosTmrStop(pTask->hTaskInfo.pTimer);
|
||||||
pTask->launchTaskTimer = NULL;
|
pTask->hTaskInfo.pTimer = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pTask->msgInfo.pTimer != NULL) {
|
||||||
|
taosTmrStop(pTask->msgInfo.pTimer);
|
||||||
|
pTask->msgInfo.pTimer = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t status = atomic_load_8((int8_t*)&(pTask->status.taskStatus));
|
int32_t status = atomic_load_8((int8_t*)&(pTask->status.taskStatus));
|
||||||
|
@ -342,6 +344,13 @@ void tFreeStreamTask(SStreamTask* pTask) {
|
||||||
walCloseReader(pTask->exec.pWalReader);
|
walCloseReader(pTask->exec.pWalReader);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pTask->pReadyMsgList = taosArrayDestroy(pTask->pReadyMsgList);
|
||||||
|
if (pTask->msgInfo.pData != NULL) {
|
||||||
|
destroyDispatchMsg(pTask->msgInfo.pData, getNumOfDispatchBranch(pTask));
|
||||||
|
pTask->msgInfo.pData = NULL;
|
||||||
|
pTask->msgInfo.dispatchMsgType = 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (pTask->outputInfo.type == TASK_OUTPUT__TABLE) {
|
if (pTask->outputInfo.type == TASK_OUTPUT__TABLE) {
|
||||||
tDeleteSchemaWrapper(pTask->tbSink.pSchemaWrapper);
|
tDeleteSchemaWrapper(pTask->tbSink.pSchemaWrapper);
|
||||||
taosMemoryFree(pTask->tbSink.pTSchema);
|
taosMemoryFree(pTask->tbSink.pTSchema);
|
||||||
|
@ -352,16 +361,10 @@ void tFreeStreamTask(SStreamTask* pTask) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pTask->pState) {
|
if (pTask->pState) {
|
||||||
qDebug("s-task:0x%x start to free task state", taskId);
|
stDebug("s-task:0x%x start to free task state", taskId);
|
||||||
streamStateClose(pTask->pState, status == TASK_STATUS__DROPPING);
|
streamStateClose(pTask->pState, status == TASK_STATUS__DROPPING);
|
||||||
}
|
}
|
||||||
|
|
||||||
pTask->pReadyMsgList = taosArrayDestroy(pTask->pReadyMsgList);
|
|
||||||
if (pTask->msgInfo.pData != NULL) {
|
|
||||||
destroyStreamDataBlock(pTask->msgInfo.pData);
|
|
||||||
pTask->msgInfo.pData = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pTask->id.idStr != NULL) {
|
if (pTask->id.idStr != NULL) {
|
||||||
taosMemoryFree((void*)pTask->id.idStr);
|
taosMemoryFree((void*)pTask->id.idStr);
|
||||||
}
|
}
|
||||||
|
@ -380,11 +383,12 @@ void tFreeStreamTask(SStreamTask* pTask) {
|
||||||
pTask->pUpstreamInfoList = NULL;
|
pTask->pUpstreamInfoList = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pTask->msgInfo.pRetryList = taosArrayDestroy(pTask->msgInfo.pRetryList);
|
||||||
taosMemoryFree(pTask->pTokenBucket);
|
taosMemoryFree(pTask->pTokenBucket);
|
||||||
taosThreadMutexDestroy(&pTask->lock);
|
taosThreadMutexDestroy(&pTask->lock);
|
||||||
taosMemoryFree(pTask);
|
taosMemoryFree(pTask);
|
||||||
|
|
||||||
qDebug("s-task:0x%x free task completed", taskId);
|
stDebug("s-task:0x%x free task completed", taskId);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, int64_t ver) {
|
int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, int64_t ver) {
|
||||||
|
@ -396,11 +400,11 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i
|
||||||
pTask->outputInfo.queue = streamQueueOpen(512 << 10);
|
pTask->outputInfo.queue = streamQueueOpen(512 << 10);
|
||||||
|
|
||||||
if (pTask->inputInfo.queue == NULL || pTask->outputInfo.queue == NULL) {
|
if (pTask->inputInfo.queue == NULL || pTask->outputInfo.queue == NULL) {
|
||||||
qError("s-task:%s failed to prepare the input/output queue, initialize task failed", pTask->id.idStr);
|
stError("s-task:%s failed to prepare the input/output queue, initialize task failed", pTask->id.idStr);
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
pTask->taskExecInfo.created = taosGetTimestampMs();
|
pTask->execInfo.created = taosGetTimestampMs();
|
||||||
pTask->inputInfo.status = TASK_INPUT_STATUS__NORMAL;
|
pTask->inputInfo.status = TASK_INPUT_STATUS__NORMAL;
|
||||||
pTask->outputInfo.status = TASK_OUTPUT_STATUS__NORMAL;
|
pTask->outputInfo.status = TASK_OUTPUT_STATUS__NORMAL;
|
||||||
pTask->pMeta = pMeta;
|
pTask->pMeta = pMeta;
|
||||||
|
@ -409,25 +413,28 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i
|
||||||
pTask->dataRange.range.maxVer = ver;
|
pTask->dataRange.range.maxVer = ver;
|
||||||
pTask->dataRange.range.minVer = ver;
|
pTask->dataRange.range.minVer = ver;
|
||||||
pTask->pMsgCb = pMsgCb;
|
pTask->pMsgCb = pMsgCb;
|
||||||
|
pTask->msgInfo.pRetryList = taosArrayInit(4, sizeof(int32_t));
|
||||||
|
|
||||||
pTask->pTokenBucket = taosMemoryCalloc(1, sizeof(STokenBucket));
|
pTask->pTokenBucket = taosMemoryCalloc(1, sizeof(STokenBucket));
|
||||||
if (pTask->pTokenBucket == NULL) {
|
if (pTask->pTokenBucket == NULL) {
|
||||||
qError("s-task:%s failed to prepare the tokenBucket, code:%s", pTask->id.idStr, tstrerror(TSDB_CODE_OUT_OF_MEMORY));
|
stError("s-task:%s failed to prepare the tokenBucket, code:%s", pTask->id.idStr, tstrerror(TSDB_CODE_OUT_OF_MEMORY));
|
||||||
return TSDB_CODE_OUT_OF_MEMORY;
|
return TSDB_CODE_OUT_OF_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
streamTaskInitTokenBucket(pTask->pTokenBucket, 50, 50);
|
// 2MiB per second for sink task
|
||||||
|
// 50 times sink operator per second
|
||||||
|
streamTaskInitTokenBucket(pTask->pTokenBucket, 50, 50, 2);
|
||||||
|
|
||||||
TdThreadMutexAttr attr = {0};
|
TdThreadMutexAttr attr = {0};
|
||||||
int code = taosThreadMutexAttrInit(&attr);
|
int code = taosThreadMutexAttrInit(&attr);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
qError("s-task:%s initElapsed mutex attr failed, code:%s", pTask->id.idStr, tstrerror(code));
|
stError("s-task:%s initElapsed mutex attr failed, code:%s", pTask->id.idStr, tstrerror(code));
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
code = taosThreadMutexAttrSetType(&attr, PTHREAD_MUTEX_RECURSIVE);
|
code = taosThreadMutexAttrSetType(&attr, PTHREAD_MUTEX_RECURSIVE);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
qError("s-task:%s set mutex attr recursive, code:%s", pTask->id.idStr, tstrerror(code));
|
stError("s-task:%s set mutex attr recursive, code:%s", pTask->id.idStr, tstrerror(code));
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -490,7 +497,7 @@ void streamTaskUpdateUpstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpS
|
||||||
SStreamChildEpInfo* pInfo = taosArrayGetP(pTask->pUpstreamInfoList, i);
|
SStreamChildEpInfo* pInfo = taosArrayGetP(pTask->pUpstreamInfoList, i);
|
||||||
if (pInfo->nodeId == nodeId) {
|
if (pInfo->nodeId == nodeId) {
|
||||||
epsetAssign(&pInfo->epSet, pEpSet);
|
epsetAssign(&pInfo->epSet, pEpSet);
|
||||||
qDebug("s-task:0x%x update the upstreamInfo taskId:0x%x(nodeId:%d) newEpset:%s", pTask->id.taskId,
|
stDebug("s-task:0x%x update the upstreamInfo taskId:0x%x(nodeId:%d) newEpset:%s", pTask->id.taskId,
|
||||||
pInfo->taskId, nodeId, buf);
|
pInfo->taskId, nodeId, buf);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -498,7 +505,7 @@ void streamTaskUpdateUpstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpS
|
||||||
}
|
}
|
||||||
|
|
||||||
void streamTaskSetFixedDownstreamInfo(SStreamTask* pTask, const SStreamTask* pDownstreamTask) {
|
void streamTaskSetFixedDownstreamInfo(SStreamTask* pTask, const SStreamTask* pDownstreamTask) {
|
||||||
STaskDispatcherFixedEp* pDispatcher = &pTask->fixedEpDispatcher;
|
STaskDispatcherFixed* pDispatcher = &pTask->fixedDispatcher;
|
||||||
pDispatcher->taskId = pDownstreamTask->id.taskId;
|
pDispatcher->taskId = pDownstreamTask->id.taskId;
|
||||||
pDispatcher->nodeId = pDownstreamTask->info.nodeId;
|
pDispatcher->nodeId = pDownstreamTask->info.nodeId;
|
||||||
pDispatcher->epSet = pDownstreamTask->info.epSet;
|
pDispatcher->epSet = pDownstreamTask->info.epSet;
|
||||||
|
@ -521,16 +528,16 @@ void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SE
|
||||||
|
|
||||||
if (pVgInfo->vgId == nodeId) {
|
if (pVgInfo->vgId == nodeId) {
|
||||||
epsetAssign(&pVgInfo->epSet, pEpSet);
|
epsetAssign(&pVgInfo->epSet, pEpSet);
|
||||||
qDebug("s-task:0x%x update the dispatch info, task:0x%x(nodeId:%d) newEpset:%s", pTask->id.taskId,
|
stDebug("s-task:0x%x update the dispatch info, task:0x%x(nodeId:%d) newEpset:%s", pTask->id.taskId,
|
||||||
pVgInfo->taskId, nodeId, buf);
|
pVgInfo->taskId, nodeId, buf);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (type == TASK_OUTPUT__FIXED_DISPATCH) {
|
} else if (type == TASK_OUTPUT__FIXED_DISPATCH) {
|
||||||
STaskDispatcherFixedEp* pDispatcher = &pTask->fixedEpDispatcher;
|
STaskDispatcherFixed* pDispatcher = &pTask->fixedDispatcher;
|
||||||
if (pDispatcher->nodeId == nodeId) {
|
if (pDispatcher->nodeId == nodeId) {
|
||||||
epsetAssign(&pDispatcher->epSet, pEpSet);
|
epsetAssign(&pDispatcher->epSet, pEpSet);
|
||||||
qDebug("s-task:0x%x update the dispatch info, task:0x%x(nodeId:%d) newEpSet:%s", pTask->id.taskId,
|
stDebug("s-task:0x%x update the dispatch info, task:0x%x(nodeId:%d) newEpSet:%s", pTask->id.taskId,
|
||||||
pDispatcher->taskId, nodeId, buf);
|
pDispatcher->taskId, nodeId, buf);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -539,20 +546,26 @@ void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SE
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamTaskStop(SStreamTask* pTask) {
|
int32_t streamTaskStop(SStreamTask* pTask) {
|
||||||
SStreamMeta* pMeta = pTask->pMeta;
|
int32_t vgId = pTask->pMeta->vgId;
|
||||||
int64_t st = taosGetTimestampMs();
|
int64_t st = taosGetTimestampMs();
|
||||||
const char* id = pTask->id.idStr;
|
const char* id = pTask->id.idStr;
|
||||||
|
|
||||||
|
taosThreadMutexLock(&pTask->lock);
|
||||||
|
if (pTask->status.taskStatus == TASK_STATUS__CK) {
|
||||||
|
stDebug("s-task:%s in checkpoint will be discarded since task is stopped", id);
|
||||||
|
}
|
||||||
pTask->status.taskStatus = TASK_STATUS__STOP;
|
pTask->status.taskStatus = TASK_STATUS__STOP;
|
||||||
qKillTask(pTask->exec.pExecutor, TSDB_CODE_SUCCESS);
|
taosThreadMutexUnlock(&pTask->lock);
|
||||||
|
|
||||||
|
qKillTask(pTask->exec.pExecutor, TSDB_CODE_SUCCESS);
|
||||||
while (/*pTask->status.schedStatus != TASK_SCHED_STATUS__INACTIVE */ !streamTaskIsIdle(pTask)) {
|
while (/*pTask->status.schedStatus != TASK_SCHED_STATUS__INACTIVE */ !streamTaskIsIdle(pTask)) {
|
||||||
qDebug("s-task:%s level:%d wait for task to be idle, check again in 100ms", id, pTask->info.taskLevel);
|
stDebug("s-task:%s level:%d wait for task to be idle and then close, check again in 100ms", id,
|
||||||
|
pTask->info.taskLevel);
|
||||||
taosMsleep(100);
|
taosMsleep(100);
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t el = taosGetTimestampMs() - st;
|
int64_t el = taosGetTimestampMs() - st;
|
||||||
qDebug("vgId:%d s-task:%s is closed in %" PRId64 " ms", pMeta->vgId, pTask->id.idStr, el);
|
stDebug("vgId:%d s-task:%s is closed in %" PRId64 " ms", vgId, id, el);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -562,7 +575,7 @@ int32_t doUpdateTaskEpset(SStreamTask* pTask, int32_t nodeId, SEpSet* pEpSet) {
|
||||||
if (pTask->info.nodeId == nodeId) { // execution task should be moved away
|
if (pTask->info.nodeId == nodeId) { // execution task should be moved away
|
||||||
epsetAssign(&pTask->info.epSet, pEpSet);
|
epsetAssign(&pTask->info.epSet, pEpSet);
|
||||||
EPSET_TO_STR(pEpSet, buf)
|
EPSET_TO_STR(pEpSet, buf)
|
||||||
qDebug("s-task:0x%x (vgId:%d) self node epset is updated %s", pTask->id.taskId, nodeId, buf);
|
stDebug("s-task:0x%x (vgId:%d) self node epset is updated %s", pTask->id.taskId, nodeId, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
// check for the dispath info and the upstream task info
|
// check for the dispath info and the upstream task info
|
||||||
|
@ -580,14 +593,14 @@ int32_t doUpdateTaskEpset(SStreamTask* pTask, int32_t nodeId, SEpSet* pEpSet) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList) {
|
int32_t streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList) {
|
||||||
STaskExecStatisInfo* p = &pTask->taskExecInfo;
|
STaskExecStatisInfo* p = &pTask->execInfo;
|
||||||
|
|
||||||
int32_t numOfNodes = taosArrayGetSize(pNodeList);
|
int32_t numOfNodes = taosArrayGetSize(pNodeList);
|
||||||
int64_t prevTs = p->latestUpdateTs;
|
int64_t prevTs = p->latestUpdateTs;
|
||||||
|
|
||||||
p->latestUpdateTs = taosGetTimestampMs();
|
p->latestUpdateTs = taosGetTimestampMs();
|
||||||
p->updateCount += 1;
|
p->updateCount += 1;
|
||||||
qDebug("s-task:%s update task nodeEp epset, updatedNodes:%d, updateCount:%d, prevTs:%" PRId64, pTask->id.idStr,
|
stDebug("s-task:%s update task nodeEp epset, updatedNodes:%d, updateCount:%d, prevTs:%" PRId64, pTask->id.idStr,
|
||||||
numOfNodes, p->updateCount, prevTs);
|
numOfNodes, p->updateCount, prevTs);
|
||||||
|
|
||||||
for (int32_t i = 0; i < taosArrayGetSize(pNodeList); ++i) {
|
for (int32_t i = 0; i < taosArrayGetSize(pNodeList); ++i) {
|
||||||
|
@ -608,7 +621,7 @@ void streamTaskResetUpstreamStageInfo(SStreamTask* pTask) {
|
||||||
pInfo->stage = -1;
|
pInfo->stage = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
qDebug("s-task:%s reset all upstream tasks stage info", pTask->id.idStr);
|
stDebug("s-task:%s reset all upstream tasks stage info", pTask->id.idStr);
|
||||||
}
|
}
|
||||||
|
|
||||||
int8_t streamTaskSetSchedStatusWait(SStreamTask* pTask) {
|
int8_t streamTaskSetSchedStatusWait(SStreamTask* pTask) {
|
||||||
|
@ -658,15 +671,43 @@ int32_t streamBuildAndSendDropTaskMsg(SMsgCb* pMsgCb, int32_t vgId, SStreamTaskI
|
||||||
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_DROP, .pCont = pReq, .contLen = sizeof(SVDropStreamTaskReq)};
|
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_DROP, .pCont = pReq, .contLen = sizeof(SVDropStreamTaskReq)};
|
||||||
int32_t code = tmsgPutToQueue(pMsgCb, WRITE_QUEUE, &msg);
|
int32_t code = tmsgPutToQueue(pMsgCb, WRITE_QUEUE, &msg);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
qError("vgId:%d failed to send drop task:0x%x msg, code:%s", vgId, pTaskId->taskId, tstrerror(code));
|
stError("vgId:%d failed to send drop task:0x%x msg, code:%s", vgId, pTaskId->taskId, tstrerror(code));
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
qDebug("vgId:%d build and send drop table:0x%x msg", vgId, pTaskId->taskId);
|
stDebug("vgId:%d build and send drop table:0x%x msg", vgId, pTaskId->taskId);
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
STaskId extractStreamTaskKey(const SStreamTask* pTask) {
|
STaskId streamTaskExtractKey(const SStreamTask* pTask) {
|
||||||
STaskId id = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId};
|
STaskId id = {.streamId = pTask->id.streamId, .taskId = pTask->id.taskId};
|
||||||
return id;
|
return id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void streamTaskInitForLaunchHTask(SHistoryTaskInfo* pInfo) {
|
||||||
|
pInfo->waitInterval = LAUNCH_HTASK_INTERVAL;
|
||||||
|
pInfo->tickCount = ceil(LAUNCH_HTASK_INTERVAL / WAIT_FOR_MINIMAL_INTERVAL);
|
||||||
|
pInfo->retryTimes = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void streamTaskSetRetryInfoForLaunch(SHistoryTaskInfo* pInfo) {
|
||||||
|
ASSERT(pInfo->tickCount == 0);
|
||||||
|
|
||||||
|
pInfo->waitInterval *= RETRY_LAUNCH_INTERVAL_INC_RATE;
|
||||||
|
pInfo->tickCount = ceil(pInfo->waitInterval / WAIT_FOR_MINIMAL_INTERVAL);
|
||||||
|
pInfo->retryTimes += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char* streamGetTaskStatusStr(int32_t status) {
|
||||||
|
switch(status) {
|
||||||
|
case TASK_STATUS__NORMAL: return "normal";
|
||||||
|
case TASK_STATUS__SCAN_HISTORY: return "scan-history";
|
||||||
|
case TASK_STATUS__HALT: return "halt";
|
||||||
|
case TASK_STATUS__PAUSE: return "paused";
|
||||||
|
case TASK_STATUS__CK: return "check-point";
|
||||||
|
case TASK_STATUS__DROPPING: return "dropping";
|
||||||
|
case TASK_STATUS__STOP: return "stop";
|
||||||
|
case TASK_STATUS__UNINIT: return "uninitialized";
|
||||||
|
default:return "";
|
||||||
|
}
|
||||||
|
}
|
|
@ -576,6 +576,7 @@ void* destroyConnPool(SCliThrd* pThrd) {
|
||||||
connList = taosHashIterate((SHashObj*)pool, connList);
|
connList = taosHashIterate((SHashObj*)pool, connList);
|
||||||
}
|
}
|
||||||
taosHashCleanup(pool);
|
taosHashCleanup(pool);
|
||||||
|
pThrd->pool = NULL;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -870,9 +871,11 @@ static void cliDestroyConn(SCliConn* conn, bool clear) {
|
||||||
connList->list->numOfConn--;
|
connList->list->numOfConn--;
|
||||||
connList->size--;
|
connList->size--;
|
||||||
} else {
|
} else {
|
||||||
|
if (pThrd->pool) {
|
||||||
SConnList* connList = taosHashGet((SHashObj*)pThrd->pool, conn->dstAddr, strlen(conn->dstAddr) + 1);
|
SConnList* connList = taosHashGet((SHashObj*)pThrd->pool, conn->dstAddr, strlen(conn->dstAddr) + 1);
|
||||||
if (connList != NULL) connList->list->numOfConn--;
|
if (connList != NULL) connList->list->numOfConn--;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
conn->list = NULL;
|
conn->list = NULL;
|
||||||
pThrd->newConnCount--;
|
pThrd->newConnCount--;
|
||||||
|
|
||||||
|
|
|
@ -1299,6 +1299,9 @@ int transSendResponse(const STransMsg* msg) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
SExHandle* exh = msg->info.handle;
|
SExHandle* exh = msg->info.handle;
|
||||||
|
if (exh == NULL) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
int64_t refId = msg->info.refId;
|
int64_t refId = msg->info.refId;
|
||||||
ASYNC_CHECK_HANDLE(exh, refId);
|
ASYNC_CHECK_HANDLE(exh, refId);
|
||||||
|
|
||||||
|
|
|
@ -798,13 +798,12 @@ void taosGetProcIODelta(int64_t *rchars, int64_t *wchars, int64_t *read_bytes, i
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t taosGetCardInfo(int64_t *receive_bytes, int64_t *transmit_bytes) {
|
int32_t taosGetCardInfo(int64_t *receive_bytes, int64_t *transmit_bytes) {
|
||||||
#ifdef WINDOWS
|
|
||||||
*receive_bytes = 0;
|
*receive_bytes = 0;
|
||||||
*transmit_bytes = 0;
|
*transmit_bytes = 0;
|
||||||
|
|
||||||
|
#ifdef WINDOWS
|
||||||
return 0;
|
return 0;
|
||||||
#elif defined(_TD_DARWIN_64)
|
#elif defined(_TD_DARWIN_64)
|
||||||
*receive_bytes = 0;
|
|
||||||
*transmit_bytes = 0;
|
|
||||||
return 0;
|
return 0;
|
||||||
#else
|
#else
|
||||||
TdFilePtr pFile = taosOpenFile(tsSysNetFile, TD_FILE_READ | TD_FILE_STREAM);
|
TdFilePtr pFile = taosOpenFile(tsSysNetFile, TD_FILE_READ | TD_FILE_STREAM);
|
||||||
|
@ -841,8 +840,8 @@ int32_t taosGetCardInfo(int64_t *receive_bytes, int64_t *transmit_bytes) {
|
||||||
"%s %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64
|
"%s %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64
|
||||||
" %" PRId64,
|
" %" PRId64,
|
||||||
nouse0, &o_rbytes, &rpackts, &nouse1, &nouse2, &nouse3, &nouse4, &nouse5, &nouse6, &o_tbytes, &tpackets);
|
nouse0, &o_rbytes, &rpackts, &nouse1, &nouse2, &nouse3, &nouse4, &nouse5, &nouse6, &o_tbytes, &tpackets);
|
||||||
*receive_bytes = o_rbytes;
|
*receive_bytes += o_rbytes;
|
||||||
*transmit_bytes = o_tbytes;
|
*transmit_bytes += o_tbytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
taosCloseFile(&pFile);
|
taosCloseFile(&pFile);
|
||||||
|
@ -854,8 +853,8 @@ int32_t taosGetCardInfo(int64_t *receive_bytes, int64_t *transmit_bytes) {
|
||||||
void taosGetCardInfoDelta(int64_t *receive_bytes, int64_t *transmit_bytes) {
|
void taosGetCardInfoDelta(int64_t *receive_bytes, int64_t *transmit_bytes) {
|
||||||
static int64_t last_receive_bytes = 0;
|
static int64_t last_receive_bytes = 0;
|
||||||
static int64_t last_transmit_bytes = 0;
|
static int64_t last_transmit_bytes = 0;
|
||||||
static int64_t cur_receive_bytes = 0;
|
int64_t cur_receive_bytes = 0;
|
||||||
static int64_t cur_transmit_bytes = 0;
|
int64_t cur_transmit_bytes = 0;
|
||||||
if (taosGetCardInfo(&cur_receive_bytes, &cur_transmit_bytes) == 0) {
|
if (taosGetCardInfo(&cur_receive_bytes, &cur_transmit_bytes) == 0) {
|
||||||
*receive_bytes = cur_receive_bytes - last_receive_bytes;
|
*receive_bytes = cur_receive_bytes - last_receive_bytes;
|
||||||
*transmit_bytes = cur_transmit_bytes - last_transmit_bytes;
|
*transmit_bytes = cur_transmit_bytes - last_transmit_bytes;
|
||||||
|
|
|
@ -100,6 +100,7 @@ int32_t tmrDebugFlag = 131;
|
||||||
int32_t uDebugFlag = 131;
|
int32_t uDebugFlag = 131;
|
||||||
int32_t rpcDebugFlag = 131;
|
int32_t rpcDebugFlag = 131;
|
||||||
int32_t qDebugFlag = 131;
|
int32_t qDebugFlag = 131;
|
||||||
|
int32_t stDebugFlag = 131;
|
||||||
int32_t wDebugFlag = 131;
|
int32_t wDebugFlag = 131;
|
||||||
int32_t sDebugFlag = 131;
|
int32_t sDebugFlag = 131;
|
||||||
int32_t tsdbDebugFlag = 131;
|
int32_t tsdbDebugFlag = 131;
|
||||||
|
|
|
@ -0,0 +1,86 @@
|
||||||
|
import sys
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.dnodes import tdDnodes
|
||||||
|
from math import inf
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def caseDescription(self):
|
||||||
|
'''
|
||||||
|
case1<shenglian zhou>: [TS-4088] timestamp range support operator
|
||||||
|
'''
|
||||||
|
return
|
||||||
|
|
||||||
|
def init(self, conn, logSql, replicaVer=1):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), True)
|
||||||
|
self._conn = conn
|
||||||
|
|
||||||
|
def restartTaosd(self, index=1, dbname="db"):
|
||||||
|
tdDnodes.stop(index)
|
||||||
|
tdDnodes.startWithoutSleep(index)
|
||||||
|
tdSql.execute(f"use ts_range")
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
print("running {}".format(__file__))
|
||||||
|
tdSql.execute("drop database if exists ts_range")
|
||||||
|
tdSql.execute("create database if not exists ts_range")
|
||||||
|
tdSql.execute('use ts_range')
|
||||||
|
tdSql.execute('create table stb1 (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double);')
|
||||||
|
|
||||||
|
tdSql.execute("create table tb1 using stb1 tags(1,'1',1.0);")
|
||||||
|
|
||||||
|
tdSql.execute("create table tb2 using stb1 tags(2,'2',2.0);")
|
||||||
|
|
||||||
|
tdSql.execute("create table tb3 using stb1 tags(3,'3',3.0);")
|
||||||
|
|
||||||
|
tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:00\',true,1,1,1,1,1,1,"123","1234",1,1,1,1);')
|
||||||
|
|
||||||
|
tdSql.execute("insert into tb1 values ('2021-11-11 09:00:01',true,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);")
|
||||||
|
|
||||||
|
tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:02\',true,2,NULL,2,NULL,2,NULL,"234",NULL,2,NULL,2,NULL);')
|
||||||
|
|
||||||
|
tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:03\',false,NULL,3,NULL,3,NULL,3,NULL,"3456",NULL,3,NULL,3);')
|
||||||
|
|
||||||
|
tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:04\',true,4,4,4,4,4,4,"456","4567",4,4,4,4);')
|
||||||
|
|
||||||
|
tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:05\',true,127,32767,2147483647,9223372036854775807,3.402823466e+38,1.79769e+308,"567","5678",254,65534,4294967294,9223372036854775807);')
|
||||||
|
|
||||||
|
tdSql.execute('insert into tb1 values (\'2021-11-11 09:00:06\',true,-127,-32767,-2147483647,-9223372036854775807,-3.402823466e+38,-1.79769e+308,"678","6789",0,0,0,0);')
|
||||||
|
|
||||||
|
tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:00\',true,1,1,1,1,1,1,"111","1111",1,1,1,1);')
|
||||||
|
|
||||||
|
tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:01\',true,2,2,2,2,2,2,"222","2222",2,2,2,2);')
|
||||||
|
|
||||||
|
tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:02\',true,3,3,2,3,3,3,"333","3333",3,3,3,3);')
|
||||||
|
|
||||||
|
tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:03\',false,4,4,4,4,4,4,"444","4444",4,4,4,4);')
|
||||||
|
|
||||||
|
tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:04\',true,5,5,5,5,5,5,"555","5555",5,5,5,5);')
|
||||||
|
|
||||||
|
tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:05\',true,6,6,6,6,6,6,"666","6666",6,6,6,6);')
|
||||||
|
|
||||||
|
tdSql.execute('insert into tb2 values (\'2021-11-11 09:00:06\',true,7,7,7,7,7,7,"777","7777",7,7,7,7);')
|
||||||
|
|
||||||
|
|
||||||
|
tdSql.query('select count(*) from stb1 where ts < 1000000000000 + 10s')
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, 0)
|
||||||
|
tdSql.query('select count(*) from stb1 where ts >= 1000000000000 + 10s')
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, 14)
|
||||||
|
|
||||||
|
tdSql.query('select count(*) from stb1 where ts > 1000000000000 - 10s and ts <= 1000000000000 + 10s')
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, 0)
|
||||||
|
|
||||||
|
tdSql.query('select count(*) from stb1 where ts > 1636592400000 + 3s');
|
||||||
|
tdSql.checkData(0, 0, 6)
|
||||||
|
#tdSql.execute('drop database ts_range')
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -49,6 +49,10 @@
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_time.py -Q 4
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_time.py -Q 4
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_26.py -Q 4
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_26.py -Q 4
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/interval_limit_opt.py -Q 4
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/interval_limit_opt.py -Q 4
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/interval_limit_opt_2.py -Q 4
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/interval_limit_opt_2.py -Q 3
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/interval_limit_opt_2.py -Q 2
|
||||||
|
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/interval_limit_opt_2.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqShow.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqShow.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDropStb.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDropStb.py
|
||||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeStb0.py
|
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeStb0.py
|
||||||
|
@ -990,6 +994,7 @@
|
||||||
,,y,script,./test.sh -f tsim/query/udf_with_const.sim
|
,,y,script,./test.sh -f tsim/query/udf_with_const.sim
|
||||||
,,y,script,./test.sh -f tsim/query/join_interval.sim
|
,,y,script,./test.sh -f tsim/query/join_interval.sim
|
||||||
,,y,script,./test.sh -f tsim/query/join_pk.sim
|
,,y,script,./test.sh -f tsim/query/join_pk.sim
|
||||||
|
,,y,script,./test.sh -f tsim/query/count_spread.sim
|
||||||
,,y,script,./test.sh -f tsim/query/unionall_as_table.sim
|
,,y,script,./test.sh -f tsim/query/unionall_as_table.sim
|
||||||
,,y,script,./test.sh -f tsim/query/multi_order_by.sim
|
,,y,script,./test.sh -f tsim/query/multi_order_by.sim
|
||||||
,,y,script,./test.sh -f tsim/query/sys_tbname.sim
|
,,y,script,./test.sh -f tsim/query/sys_tbname.sim
|
||||||
|
@ -1005,6 +1010,7 @@
|
||||||
,,y,script,./test.sh -f tsim/query/nullColSma.sim
|
,,y,script,./test.sh -f tsim/query/nullColSma.sim
|
||||||
,,y,script,./test.sh -f tsim/query/bug3398.sim
|
,,y,script,./test.sh -f tsim/query/bug3398.sim
|
||||||
,,y,script,./test.sh -f tsim/query/explain_tsorder.sim
|
,,y,script,./test.sh -f tsim/query/explain_tsorder.sim
|
||||||
|
,,y,script,./test.sh -f tsim/query/apercentile.sim
|
||||||
,,y,script,./test.sh -f tsim/qnode/basic1.sim
|
,,y,script,./test.sh -f tsim/qnode/basic1.sim
|
||||||
,,y,script,./test.sh -f tsim/snode/basic1.sim
|
,,y,script,./test.sh -f tsim/snode/basic1.sim
|
||||||
,,y,script,./test.sh -f tsim/mnode/basic1.sim
|
,,y,script,./test.sh -f tsim/mnode/basic1.sim
|
||||||
|
@ -1240,6 +1246,7 @@
|
||||||
|
|
||||||
#develop test
|
#develop test
|
||||||
,,n,develop-test,python3 ./test.py -f 2-query/table_count_scan.py
|
,,n,develop-test,python3 ./test.py -f 2-query/table_count_scan.py
|
||||||
|
,,n,develop-test,python3 ./test.py -f 2-query/ts-range.py
|
||||||
,,n,develop-test,python3 ./test.py -f 2-query/show_create_db.py
|
,,n,develop-test,python3 ./test.py -f 2-query/show_create_db.py
|
||||||
,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/auto_create_table_json.py
|
,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/auto_create_table_json.py
|
||||||
,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/custom_col_tag.py
|
,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/custom_col_tag.py
|
||||||
|
|
|
@ -0,0 +1,20 @@
|
||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
set -x
|
||||||
|
|
||||||
|
cd $1
|
||||||
|
git reset --hard HEAD
|
||||||
|
git checkout -- .
|
||||||
|
git checkout $2
|
||||||
|
git pull
|
||||||
|
|
||||||
|
sed -i ':a;N;$!ba;s/\(.*\)OFF/\1ON/' $1/cmake/cmake.options
|
||||||
|
|
||||||
|
mkdir -p $1/debug
|
||||||
|
rm -rf $1/debug/*
|
||||||
|
cd $1/debug
|
||||||
|
cmake .. -DBUILD_TOOLS=true
|
||||||
|
cd $1/debug
|
||||||
|
make -j 4
|
||||||
|
cd $1/debug
|
||||||
|
make install
|
|
@ -0,0 +1,36 @@
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
class BuildTDengine:
|
||||||
|
def __init__(self, host='vm96', path = '/root/pxiao/TDengine', branch = 'main') -> None:
|
||||||
|
self.host = host
|
||||||
|
self.path = path
|
||||||
|
self.branch = branch
|
||||||
|
|
||||||
|
def build(self):
|
||||||
|
parameters=[self.path, self.branch]
|
||||||
|
build_fild = "./build.sh"
|
||||||
|
try:
|
||||||
|
# Run the Bash script using subprocess
|
||||||
|
subprocess.run(['bash', build_fild] + parameters, check=True)
|
||||||
|
print("TDengine build successfully.")
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
print(f"Error running Bash script: {e}")
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
print(f"File not found: {e}")
|
||||||
|
|
||||||
|
def get_commit_id(self):
|
||||||
|
cmd = f"cd {self.path} && git rev-parse --short @ "
|
||||||
|
try:
|
||||||
|
# Run the Bash command and capture the output
|
||||||
|
result = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True, text=True)
|
||||||
|
|
||||||
|
# Access the output from the 'result' object
|
||||||
|
output = result.stdout
|
||||||
|
|
||||||
|
return output.strip()
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
print(f"Error running Bash command: {e}")
|
||||||
|
|
||||||
|
bd = BuildTDengine()
|
||||||
|
print(bd.get_commit_id())
|
|
@ -0,0 +1,100 @@
|
||||||
|
import datetime
|
||||||
|
import json
|
||||||
|
|
||||||
|
class InsertJson:
|
||||||
|
def __init__(self, tables = 10000, records_per_table = 10000, interlace_rows = 0, stt_trigger = 1) -> None:
|
||||||
|
self.tables = tables
|
||||||
|
self.records_per_table = records_per_table
|
||||||
|
self.interlace_rows = interlace_rows
|
||||||
|
self.stt_trigger = stt_trigger
|
||||||
|
|
||||||
|
def get_db_cfg(self) -> dict:
|
||||||
|
return {
|
||||||
|
"name": "test",
|
||||||
|
"drop": "true",
|
||||||
|
"replica": 1,
|
||||||
|
"precision": "ms",
|
||||||
|
"cachemodel": "'both'",
|
||||||
|
"keep": 3650,
|
||||||
|
"minRows": 100,
|
||||||
|
"maxRows": 4096,
|
||||||
|
"comp": 2,
|
||||||
|
"vgroups": 10,
|
||||||
|
"stt_trigger": self.stt_trigger
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_stb_cfg(self) -> list:
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"name": "meters",
|
||||||
|
"child_table_exists": "no",
|
||||||
|
"childtable_count": self.tables,
|
||||||
|
"childtable_prefix": "d",
|
||||||
|
"escape_character": "yes",
|
||||||
|
"auto_create_table": "no",
|
||||||
|
"batch_create_tbl_num": 5,
|
||||||
|
"data_source": "rand",
|
||||||
|
"insert_mode": "taosc",
|
||||||
|
"non_stop_mode": "no",
|
||||||
|
"line_protocol": "line",
|
||||||
|
"insert_rows": self.records_per_table,
|
||||||
|
"childtable_limit": 10000,
|
||||||
|
"childtable_offset": 100,
|
||||||
|
"interlace_rows": self.interlace_rows,
|
||||||
|
"insert_interval": 0,
|
||||||
|
"partial_col_num": 0,
|
||||||
|
"disorder_ratio": 0,
|
||||||
|
"disorder_range": 1000,
|
||||||
|
"timestamp_step": 10,
|
||||||
|
"start_timestamp": "2022-10-01 00:00:00.000",
|
||||||
|
"sample_format": "csv",
|
||||||
|
"sample_file": "./sample.csv",
|
||||||
|
"use_sample_ts": "no",
|
||||||
|
"tags_file": "",
|
||||||
|
"columns": self.get_column_list(),
|
||||||
|
"tags": self.get_tag_list()
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_column_list(self) -> list:
|
||||||
|
return [
|
||||||
|
{"type": "FLOAT", "name": "current", "count": 1, "max": 12, "min": 8},
|
||||||
|
{"type": "INT", "name": "voltage", "max": 225, "min": 215},
|
||||||
|
{"type": "FLOAT", "name": "phase", "max": 1, "min": 0},
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_tag_list(self) -> list:
|
||||||
|
return [
|
||||||
|
{ "type": "TINYINT", "name": "groupid", "max": 10, "min": 1 },
|
||||||
|
{ "name": "location", "type": "BINARY", "len": 16, "values": ["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View", "Sunnyvale", "Santa Clara", "Cupertino"]}
|
||||||
|
]
|
||||||
|
|
||||||
|
def get_insert_cfg(self) -> dict:
|
||||||
|
return {
|
||||||
|
"filetype": "insert",
|
||||||
|
"cfgdir": "/etc/taos",
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"port": 6030,
|
||||||
|
"user": "root",
|
||||||
|
"password": "taosdata",
|
||||||
|
"thread_count": 10,
|
||||||
|
"create_table_thread_count": 7,
|
||||||
|
"result_file": "/tmp/insert_res.txt",
|
||||||
|
"confirm_parameter_prompt": "no",
|
||||||
|
"insert_interval": 0,
|
||||||
|
"num_of_records_per_req": 1000,
|
||||||
|
"max_sql_len": 1024000,
|
||||||
|
"databases": [{
|
||||||
|
"dbinfo": self.get_db_cfg(),
|
||||||
|
"super_tables": self.get_stb_cfg()
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
|
||||||
|
def create_insert_file(self) -> str:
|
||||||
|
date = datetime.datetime.now()
|
||||||
|
file_create_table = f"/tmp/insert_{date:%F-%H%M}.json"
|
||||||
|
|
||||||
|
with open(file_create_table, 'w') as f:
|
||||||
|
json.dump(self.get_insert_cfg(), f)
|
||||||
|
|
||||||
|
return file_create_table
|
|
@ -0,0 +1,60 @@
|
||||||
|
import mysql.connector
|
||||||
|
|
||||||
|
class MySQLDatabase:
|
||||||
|
def __init__(self, host = '192.168.1.116', port = 3306, user = 'root', password = 'taosdata', database = 'perf_data'):
|
||||||
|
self.host = host
|
||||||
|
self.port = port
|
||||||
|
self.user = user
|
||||||
|
self.password = password
|
||||||
|
self.database = database
|
||||||
|
self.connection = None
|
||||||
|
|
||||||
|
def connect(self):
|
||||||
|
try:
|
||||||
|
self.connection = mysql.connector.connect(
|
||||||
|
host=self.host,
|
||||||
|
port=self.port,
|
||||||
|
user=self.user,
|
||||||
|
password=self.password,
|
||||||
|
database=self.database
|
||||||
|
)
|
||||||
|
except mysql.connector.Error as error:
|
||||||
|
print("Failed to connect to database: {}".format(error))
|
||||||
|
|
||||||
|
def execute(self, query, params=None):
|
||||||
|
cursor = self.connection.cursor()
|
||||||
|
try:
|
||||||
|
cursor.execute(query, params)
|
||||||
|
self.connection.commit()
|
||||||
|
except mysql.connector.Error as error:
|
||||||
|
print("Failed to execute query: {}".format(error))
|
||||||
|
finally:
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
|
def query(self, query, params=None):
|
||||||
|
cursor = self.connection.cursor()
|
||||||
|
try:
|
||||||
|
cursor.execute(query, params)
|
||||||
|
result = cursor.fetchall()
|
||||||
|
return result
|
||||||
|
except mysql.connector.Error as error:
|
||||||
|
print("Failed to execute query: {}".format(error))
|
||||||
|
finally:
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
|
def get_id(self, query, params = None):
|
||||||
|
cursor = self.connection.cursor()
|
||||||
|
try:
|
||||||
|
cursor.execute(query, params)
|
||||||
|
cursor.execute("select last_insert_id()")
|
||||||
|
id = cursor.fetchone()[0]
|
||||||
|
self.connection.commit()
|
||||||
|
|
||||||
|
return id
|
||||||
|
except mysql.connector.Error as error:
|
||||||
|
print("Failed to execute query: {}".format(error))
|
||||||
|
finally:
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
|
def disconnect(self):
|
||||||
|
self.connection.close()
|
|
@ -0,0 +1,41 @@
|
||||||
|
import datetime
|
||||||
|
import json
|
||||||
|
|
||||||
|
class QueryJson:
|
||||||
|
def __init__(self, sql, query_times = 1) -> None:
|
||||||
|
self.sql = sql
|
||||||
|
self.query_times = query_times
|
||||||
|
|
||||||
|
def gen_query_json(self) -> dict:
|
||||||
|
return {
|
||||||
|
"filetype": "query",
|
||||||
|
"cfgdir": "/etc/taos",
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"port": 6030,
|
||||||
|
"user": "root",
|
||||||
|
"password": "taosdata",
|
||||||
|
"confirm_parameter_prompt": "no",
|
||||||
|
"databases": "test",
|
||||||
|
"query_times": self.query_times,
|
||||||
|
"query_mode": "taosc",
|
||||||
|
"specified_table_query": {
|
||||||
|
"query_interval": 1,
|
||||||
|
"concurrent": 1,
|
||||||
|
"sqls": [
|
||||||
|
{
|
||||||
|
"sql": "%s" % self.sql,
|
||||||
|
"result": "./query_res.txt"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
def create_query_file(self) -> str:
|
||||||
|
date = datetime.datetime.now()
|
||||||
|
file_create_table = f"/tmp/query_{date:%F-%H%M}.json"
|
||||||
|
|
||||||
|
with open(file_create_table, 'w') as f:
|
||||||
|
json.dump(self.gen_query_json(), f)
|
||||||
|
|
||||||
|
return file_create_table
|
|
@ -0,0 +1,29 @@
|
||||||
|
import os
|
||||||
|
import mysqldb
|
||||||
|
import insert_json
|
||||||
|
import query_json
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
num_of_tables = 10000
|
||||||
|
records_per_table = 10000
|
||||||
|
interlace_rows = 0
|
||||||
|
stt_trigger = 1
|
||||||
|
|
||||||
|
db = mysqldb.MySQLDatabase()
|
||||||
|
db.connect()
|
||||||
|
sql = f"select id from scenarios where num_of_tables = {num_of_tables} and records_per_table = {records_per_table} and interlace_rows = {interlace_rows} and stt_trigger = {stt_trigger}"
|
||||||
|
row = db.query(sql)
|
||||||
|
if row is None:
|
||||||
|
id = db.get_id(f"insert into scenarios(num_of_tables, records_per_table, interlace_rows, stt_trigger) values({num_of_tables},{records_per_table}, {interlace_rows}, {stt_trigger})")
|
||||||
|
else:
|
||||||
|
id = row[0][0]
|
||||||
|
|
||||||
|
print(id)
|
||||||
|
|
||||||
|
db.disconnect()
|
||||||
|
|
||||||
|
insert = insert_json.InsertJson(num_of_tables, records_per_table, interlace_rows, stt_trigger)
|
||||||
|
os.system(f"taosBenchmark -f {insert.create_insert_file()}")
|
||||||
|
|
||||||
|
|
|
@ -78,7 +78,7 @@ class TDSql:
|
||||||
self.cursor.execute(s)
|
self.cursor.execute(s)
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
|
|
||||||
def error(self, sql, expectedErrno = None):
|
def error(self, sql, expectedErrno = None, expectErrInfo = None):
|
||||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||||
expectErrNotOccured = True
|
expectErrNotOccured = True
|
||||||
|
|
||||||
|
@ -87,12 +87,9 @@ class TDSql:
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
expectErrNotOccured = False
|
expectErrNotOccured = False
|
||||||
self.errno = e.errno
|
self.errno = e.errno
|
||||||
self.error_info = repr(e)
|
error_info = repr(e)
|
||||||
# print(error_info)
|
self.error_info = error_info[error_info.index('(')+1:-1].split(",")[0].replace("'","")
|
||||||
# self.error_info = error_info[error_info.index('(')+1:-1].split(",")[0].replace("'","")
|
|
||||||
# self.error_info = (','.join(error_info.split(",")[:-1]).split("(",1)[1:][0]).replace("'","")
|
# self.error_info = (','.join(error_info.split(",")[:-1]).split("(",1)[1:][0]).replace("'","")
|
||||||
# print("!!!!!!!!!!!!!!",self.error_info)
|
|
||||||
|
|
||||||
if expectErrNotOccured:
|
if expectErrNotOccured:
|
||||||
tdLog.exit("%s(%d) failed: sql:%s, expect error not occured" % (caller.filename, caller.lineno, sql))
|
tdLog.exit("%s(%d) failed: sql:%s, expect error not occured" % (caller.filename, caller.lineno, sql))
|
||||||
else:
|
else:
|
||||||
|
@ -108,8 +105,15 @@ class TDSql:
|
||||||
else:
|
else:
|
||||||
tdLog.info("sql:%s, expect error occured" % (sql))
|
tdLog.info("sql:%s, expect error occured" % (sql))
|
||||||
|
|
||||||
return self.error_info
|
if expectErrInfo != None:
|
||||||
|
if expectErrInfo == self.error_info:
|
||||||
|
tdLog.info("sql:%s, expected expectErrInfo %s occured" % (sql, expectErrInfo))
|
||||||
|
else:
|
||||||
|
tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo %s occured, but not expected errno %s" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo))
|
||||||
|
else:
|
||||||
|
tdLog.info("sql:%s, expect error occured" % (sql))
|
||||||
|
|
||||||
|
return self.error_info
|
||||||
|
|
||||||
def query(self, sql, row_tag=None, queryTimes=10, count_expected_res=None):
|
def query(self, sql, row_tag=None, queryTimes=10, count_expected_res=None):
|
||||||
self.sql = sql
|
self.sql = sql
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
sql connect
|
||||||
|
|
||||||
|
sql drop database if exists test2;
|
||||||
|
sql create database test2;
|
||||||
|
sql use test2;
|
||||||
|
sql create table s(ts timestamp,v double) tags(id nchar(16));
|
||||||
|
sql create table t using s tags('11') ;
|
||||||
|
sql insert into t values(now,null);
|
||||||
|
sql select APERCENTILE(v,50,'t-digest') as k from s where ts > now-1d and ts < now interval(1h);
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data00 != NULL then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select APERCENTILE(v,50) as k from s where ts > now-1d and ts < now interval(1h);
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data00 != NULL then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql select APERCENTILE(v,50) as k from s where ts > now-1d and ts < now interval(1h);
|
||||||
|
if $rows != 1 then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data00 != NULL then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -0,0 +1,24 @@
|
||||||
|
system sh/stop_dnodes.sh
|
||||||
|
system sh/deploy.sh -n dnode1 -i 1
|
||||||
|
system sh/exec.sh -n dnode1 -s start
|
||||||
|
sql connect
|
||||||
|
|
||||||
|
sql create database test;
|
||||||
|
sql use test;
|
||||||
|
sql create table st(ts timestamp, f int) tags(t int);
|
||||||
|
sql insert into ct1 using st tags(1) values(now, 0)(now+1s, 1)(now+2s, 10)(now+3s, 11)
|
||||||
|
sql insert into ct2 using st tags(2) values(now+2s, 2)(now+3s, 3)
|
||||||
|
sql insert into ct3 using st tags(3) values(now+4s, 4)(now+5s, 5)
|
||||||
|
sql insert into ct4 using st tags(4) values(now+6s, 6)(now+7s, 7)
|
||||||
|
|
||||||
|
sql select count(*), spread(ts) from st where tbname='ct1'
|
||||||
|
print $data00, $data01
|
||||||
|
if $data00 != @4@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
if $data01 != @3000.000000000@ then
|
||||||
|
return -1
|
||||||
|
endi
|
||||||
|
|
||||||
|
sql drop database test;
|
||||||
|
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -68,10 +68,10 @@ print =============connect with root, revoke read from u1, all from u2
|
||||||
sql connect
|
sql connect
|
||||||
sql revoke read on u1_d1.* from u1
|
sql revoke read on u1_d1.* from u1
|
||||||
sql revoke all on u2_d1.* from u2
|
sql revoke all on u2_d1.* from u2
|
||||||
sleep 1000
|
|
||||||
|
|
||||||
print =============connect with u1
|
print =============connect with u1
|
||||||
sql connect u1
|
sql connect u1
|
||||||
|
sql reset query cache
|
||||||
sql insert into u1_d1.t1 values(now, 1)
|
sql insert into u1_d1.t1 values(now, 1)
|
||||||
sql_error select * from u1_d1.t1;
|
sql_error select * from u1_d1.t1;
|
||||||
|
|
||||||
|
@ -85,9 +85,9 @@ sql connect
|
||||||
sql grant read on u1_d1.* to u1
|
sql grant read on u1_d1.* to u1
|
||||||
sql grant all on u2_d1.* to u2
|
sql grant all on u2_d1.* to u2
|
||||||
|
|
||||||
sleep 1000
|
|
||||||
print =============connect with u1
|
print =============connect with u1
|
||||||
sql connect u1
|
sql connect u1
|
||||||
|
sql reset query cache
|
||||||
sql select * from u1_d1.t1;
|
sql select * from u1_d1.t1;
|
||||||
sql insert into u1_d1.t1 values(now, 2)
|
sql insert into u1_d1.t1 values(now, 2)
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ class TDTestCase:
|
||||||
def init(self, conn, logSql, replicaVar=1):
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
self.replicaVar = int(replicaVar)
|
self.replicaVar = int(replicaVar)
|
||||||
tdLog.debug("start to execute %s" % __file__)
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
tdSql.init(conn.cursor())
|
tdSql.init(conn.cursor(), True)
|
||||||
self.setsql = TDSetSql()
|
self.setsql = TDSetSql()
|
||||||
self.dbname = 'db'
|
self.dbname = 'db'
|
||||||
self.stbname = 'stb'
|
self.stbname = 'stb'
|
||||||
|
@ -217,7 +217,7 @@ class TDTestCase:
|
||||||
tdSql.checkEqual(20470,len(tdSql.queryResult))
|
tdSql.checkEqual(20470,len(tdSql.queryResult))
|
||||||
|
|
||||||
tdSql.query("select * from information_schema.ins_columns where db_name ='information_schema'")
|
tdSql.query("select * from information_schema.ins_columns where db_name ='information_schema'")
|
||||||
tdSql.checkEqual(193, len(tdSql.queryResult))
|
tdSql.checkEqual(194, len(tdSql.queryResult))
|
||||||
|
|
||||||
tdSql.query("select * from information_schema.ins_columns where db_name ='performance_schema'")
|
tdSql.query("select * from information_schema.ins_columns where db_name ='performance_schema'")
|
||||||
tdSql.checkEqual(54, len(tdSql.queryResult))
|
tdSql.checkEqual(54, len(tdSql.queryResult))
|
||||||
|
|
|
@ -180,6 +180,13 @@ class TDTestCase:
|
||||||
tdSql.error(f'show indexes from db.ctb1 from db')
|
tdSql.error(f'show indexes from db.ctb1 from db')
|
||||||
tdSql.error(f'show indexes from `db`.`ctb1` from db')
|
tdSql.error(f'show indexes from `db`.`ctb1` from db')
|
||||||
|
|
||||||
|
# check error information
|
||||||
|
tdSql.error(f'create index idx1 on db2.stb (t1);', expectErrInfo='Database not exist')
|
||||||
|
tdSql.error(f'use db2;', expectErrInfo='Database not exist')
|
||||||
|
tdSql.error(f' alter stable db2.stb add column c2 int;', expectErrInfo='Database not exist')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
tdSql.close()
|
tdSql.close()
|
||||||
tdLog.success("%s successfully executed" % __file__)
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
|
@ -55,7 +55,7 @@ class TDTestCase:
|
||||||
tdSql.checkData(0, 2, 0)
|
tdSql.checkData(0, 2, 0)
|
||||||
|
|
||||||
tdSql.query("show dnode 1 variables like '%debugFlag'")
|
tdSql.query("show dnode 1 variables like '%debugFlag'")
|
||||||
tdSql.checkRows(21)
|
tdSql.checkRows(22)
|
||||||
|
|
||||||
tdSql.query("show dnode 1 variables like '____debugFlag'")
|
tdSql.query("show dnode 1 variables like '____debugFlag'")
|
||||||
tdSql.checkRows(2)
|
tdSql.checkRows(2)
|
||||||
|
|
|
@ -174,61 +174,6 @@ class TDTestCase:
|
||||||
for offset in range(0, 1000, 500):
|
for offset in range(0, 1000, 500):
|
||||||
self.test_interval_limit_asc(offset)
|
self.test_interval_limit_asc(offset)
|
||||||
self.test_interval_limit_desc(offset)
|
self.test_interval_limit_desc(offset)
|
||||||
self.test_interval_fill_limit(offset)
|
|
||||||
self.test_interval_order_by_limit(offset)
|
|
||||||
self.test_interval_partition_by_slimit(offset)
|
|
||||||
|
|
||||||
def test_interval_fill_limit(self, offset: int = 0):
|
|
||||||
sqls = [
|
|
||||||
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
|
||||||
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1s) fill(linear)",
|
|
||||||
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
|
||||||
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1m) fill(linear)",
|
|
||||||
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
|
||||||
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1h) fill(linear)",
|
|
||||||
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
|
||||||
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1d) fill(linear)"
|
|
||||||
]
|
|
||||||
for sql in sqls:
|
|
||||||
self.query_and_check_with_limit(sql, 5000, 1000, offset)
|
|
||||||
|
|
||||||
def test_interval_order_by_limit(self, offset: int = 0):
|
|
||||||
sqls = [
|
|
||||||
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
|
||||||
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by b",
|
|
||||||
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
|
||||||
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by a desc",
|
|
||||||
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), last(ts) from meters \
|
|
||||||
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by a desc",
|
|
||||||
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
|
||||||
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by count(*), sum(c1), a",
|
|
||||||
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
|
||||||
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by a, count(*), sum(c1)",
|
|
||||||
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
|
||||||
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by b",
|
|
||||||
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
|
||||||
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by a desc",
|
|
||||||
"select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters \
|
|
||||||
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by a desc",
|
|
||||||
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
|
||||||
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by count(*), sum(c1), a",
|
|
||||||
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
|
||||||
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by a, count(*), sum(c1)",
|
|
||||||
]
|
|
||||||
for sql in sqls:
|
|
||||||
self.query_and_check_with_limit(sql, 6000, 2000, offset)
|
|
||||||
|
|
||||||
def test_interval_partition_by_slimit(self, offset: int = 0):
|
|
||||||
sqls = [
|
|
||||||
"select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters "
|
|
||||||
"where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by t1 interval(1m)",
|
|
||||||
"select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters "
|
|
||||||
"where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by t1 interval(1h)",
|
|
||||||
"select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters "
|
|
||||||
"where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by c3 interval(1m)",
|
|
||||||
]
|
|
||||||
for sql in sqls:
|
|
||||||
self.query_and_check_with_slimit(sql, 10, 2, offset)
|
|
||||||
|
|
||||||
def test_interval_partition_by_slimit_limit(self):
|
def test_interval_partition_by_slimit_limit(self):
|
||||||
sql = "select * from (select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts),c3 from meters " \
|
sql = "select * from (select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts),c3 from meters " \
|
||||||
|
|
|
@ -0,0 +1,222 @@
|
||||||
|
import taos
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import socket
|
||||||
|
import os
|
||||||
|
import threading
|
||||||
|
import math
|
||||||
|
|
||||||
|
from util.log import *
|
||||||
|
from util.sql import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.dnodes import *
|
||||||
|
from util.common import *
|
||||||
|
# from tmqCommon import *
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def __init__(self):
|
||||||
|
self.vgroups = 4
|
||||||
|
self.ctbNum = 10
|
||||||
|
self.rowsPerTbl = 10000
|
||||||
|
self.duraion = '1h'
|
||||||
|
|
||||||
|
def init(self, conn, logSql, replicaVar=1):
|
||||||
|
self.replicaVar = int(replicaVar)
|
||||||
|
tdLog.debug(f"start to excute {__file__}")
|
||||||
|
tdSql.init(conn.cursor(), False)
|
||||||
|
|
||||||
|
def create_database(self,tsql, dbName,dropFlag=1,vgroups=2,replica=1, duration:str='1d'):
|
||||||
|
if dropFlag == 1:
|
||||||
|
tsql.execute("drop database if exists %s"%(dbName))
|
||||||
|
|
||||||
|
tsql.execute("create database if not exists %s vgroups %d replica %d duration %s"%(dbName, vgroups, replica, duration))
|
||||||
|
tdLog.debug("complete to create database %s"%(dbName))
|
||||||
|
return
|
||||||
|
|
||||||
|
def create_stable(self,tsql, paraDict):
|
||||||
|
colString = tdCom.gen_column_type_str(colname_prefix=paraDict["colPrefix"], column_elm_list=paraDict["colSchema"])
|
||||||
|
tagString = tdCom.gen_tag_type_str(tagname_prefix=paraDict["tagPrefix"], tag_elm_list=paraDict["tagSchema"])
|
||||||
|
sqlString = f"create table if not exists %s.%s (%s) tags (%s)"%(paraDict["dbName"], paraDict["stbName"], colString, tagString)
|
||||||
|
tdLog.debug("%s"%(sqlString))
|
||||||
|
tsql.execute(sqlString)
|
||||||
|
return
|
||||||
|
|
||||||
|
def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1,ctbStartIdx=0):
|
||||||
|
for i in range(ctbNum):
|
||||||
|
sqlString = "create table %s.%s%d using %s.%s tags(%d, 'tb%d', 'tb%d', %d, %d, %d)" % \
|
||||||
|
(dbName,ctbPrefix,i+ctbStartIdx,dbName,stbName,(i+ctbStartIdx) % 5,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx)
|
||||||
|
tsql.execute(sqlString)
|
||||||
|
|
||||||
|
tdLog.debug("complete to create %d child tables by %s.%s" %(ctbNum, dbName, stbName))
|
||||||
|
return
|
||||||
|
|
||||||
|
def insert_data(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs,tsStep):
|
||||||
|
tdLog.debug("start to insert data ............")
|
||||||
|
tsql.execute("use %s" %dbName)
|
||||||
|
pre_insert = "insert into "
|
||||||
|
sql = pre_insert
|
||||||
|
|
||||||
|
for i in range(ctbNum):
|
||||||
|
rowsBatched = 0
|
||||||
|
sql += " %s%d values "%(ctbPrefix,i)
|
||||||
|
for j in range(rowsPerTbl):
|
||||||
|
if (i < ctbNum/2):
|
||||||
|
sql += "(%d, %d, %d, %d,%d,%d,%d,true,'binary%d', 'nchar%d') "%(startTs + j*tsStep, j%10, j%10, j%10, j%10, j%10, j%10, j%10, j%10)
|
||||||
|
else:
|
||||||
|
sql += "(%d, %d, NULL, %d,NULL,%d,%d,true,'binary%d', 'nchar%d') "%(startTs + j*tsStep, j%10, j%10, j%10, j%10, j%10, j%10)
|
||||||
|
rowsBatched += 1
|
||||||
|
if ((rowsBatched == batchNum) or (j == rowsPerTbl - 1)):
|
||||||
|
tsql.execute(sql)
|
||||||
|
rowsBatched = 0
|
||||||
|
if j < rowsPerTbl - 1:
|
||||||
|
sql = "insert into %s%d values " %(ctbPrefix,i)
|
||||||
|
else:
|
||||||
|
sql = "insert into "
|
||||||
|
if sql != pre_insert:
|
||||||
|
tsql.execute(sql)
|
||||||
|
tdLog.debug("insert data ............ [OK]")
|
||||||
|
return
|
||||||
|
|
||||||
|
def prepareTestEnv(self):
|
||||||
|
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
|
||||||
|
paraDict = {'dbName': 'test',
|
||||||
|
'dropFlag': 1,
|
||||||
|
'vgroups': 2,
|
||||||
|
'stbName': 'meters',
|
||||||
|
'colPrefix': 'c',
|
||||||
|
'tagPrefix': 't',
|
||||||
|
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'FLOAT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'smallint', 'count':1},{'type': 'tinyint', 'count':1},{'type': 'bool', 'count':1},{'type': 'binary', 'len':10, 'count':1},{'type': 'nchar', 'len':10, 'count':1}],
|
||||||
|
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'nchar', 'len':20, 'count':1},{'type': 'binary', 'len':20, 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'smallint', 'count':1},{'type': 'DOUBLE', 'count':1}],
|
||||||
|
'ctbPrefix': 't',
|
||||||
|
'ctbStartIdx': 0,
|
||||||
|
'ctbNum': 100,
|
||||||
|
'rowsPerTbl': 10000,
|
||||||
|
'batchNum': 3000,
|
||||||
|
'startTs': 1537146000000,
|
||||||
|
'tsStep': 600000}
|
||||||
|
|
||||||
|
paraDict['vgroups'] = self.vgroups
|
||||||
|
paraDict['ctbNum'] = self.ctbNum
|
||||||
|
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||||
|
|
||||||
|
tdLog.info("create database")
|
||||||
|
self.create_database(tsql=tdSql, dbName=paraDict["dbName"], dropFlag=paraDict["dropFlag"], vgroups=paraDict["vgroups"], replica=self.replicaVar, duration=self.duraion)
|
||||||
|
|
||||||
|
tdLog.info("create stb")
|
||||||
|
self.create_stable(tsql=tdSql, paraDict=paraDict)
|
||||||
|
|
||||||
|
tdLog.info("create child tables")
|
||||||
|
self.create_ctable(tsql=tdSql, dbName=paraDict["dbName"], \
|
||||||
|
stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],\
|
||||||
|
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict["ctbStartIdx"])
|
||||||
|
self.insert_data(tsql=tdSql, dbName=paraDict["dbName"],\
|
||||||
|
ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"],\
|
||||||
|
rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],\
|
||||||
|
startTs=paraDict["startTs"],tsStep=paraDict["tsStep"])
|
||||||
|
return
|
||||||
|
|
||||||
|
def check_first_rows(self, all_rows, limited_rows, offset: int = 0):
|
||||||
|
for i in range(0, len(limited_rows) - 1):
|
||||||
|
if limited_rows[i] != all_rows[i + offset]:
|
||||||
|
tdLog.info("row: %d, row in all: %s" % (i+offset+1, str(all_rows[i+offset])))
|
||||||
|
tdLog.info("row: %d, row in limted: %s" % (i+1, str(limited_rows[i])))
|
||||||
|
tdLog.exit("row data check failed")
|
||||||
|
tdLog.info("all rows are the same as query without limit..")
|
||||||
|
|
||||||
|
def query_and_check_with_slimit(self, sql: str, max_limit: int, step: int, offset: int = 0):
|
||||||
|
self.query_and_check_with_limit(sql, max_limit, step, offset, ' slimit ')
|
||||||
|
|
||||||
|
def query_and_check_with_limit(self, sql: str, max_limit: int, step: int, offset: int = 0, limit_str: str = ' limit '):
|
||||||
|
for limit in range(0, max_limit, step):
|
||||||
|
limited_sql = sql + limit_str + str(offset) + "," + str(limit)
|
||||||
|
tdLog.info("query with sql: %s " % (sql) + limit_str + " %d,%d" % (offset, limit))
|
||||||
|
all_rows = tdSql.getResult(sql)
|
||||||
|
limited_rows = tdSql.getResult(limited_sql)
|
||||||
|
tdLog.info("all rows: %d, limited rows: %d" % (len(all_rows), len(limited_rows)))
|
||||||
|
if limit_str == ' limit ':
|
||||||
|
if limit + offset <= len(all_rows) and len(limited_rows) != limit:
|
||||||
|
tdLog.exit("limited sql has less rows than limit value which is not right, \
|
||||||
|
limit: %d, limited_rows: %d, all_rows: %d, offset: %d" % (limit, len(limited_rows), len(all_rows), offset))
|
||||||
|
elif limit + offset > len(all_rows) and offset < len(all_rows) and offset + len(limited_rows) != len(all_rows):
|
||||||
|
tdLog.exit("limited sql has less rows than all_rows which is not right, \
|
||||||
|
limit: %d, limited_rows: %d, all_rows: %d, offset: %d" % (limit, len(limited_rows), len(all_rows), offset))
|
||||||
|
elif offset >= len(all_rows) and len(limited_rows) != 0:
|
||||||
|
tdLog.exit("limited rows should be zero, \
|
||||||
|
limit: %d, limited_rows: %d, all_rows: %d, offset: %d" % (limit, len(limited_rows), len(all_rows), offset))
|
||||||
|
|
||||||
|
self.check_first_rows(all_rows, limited_rows, offset)
|
||||||
|
|
||||||
|
def test_interval_limit_offset(self):
|
||||||
|
for offset in range(0, 1000, 500):
|
||||||
|
self.test_interval_fill_limit(offset)
|
||||||
|
self.test_interval_order_by_limit(offset)
|
||||||
|
self.test_interval_partition_by_slimit(offset)
|
||||||
|
|
||||||
|
def test_interval_fill_limit(self, offset: int = 0):
|
||||||
|
sqls = [
|
||||||
|
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
||||||
|
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1s) fill(linear)",
|
||||||
|
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
||||||
|
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1m) fill(linear)",
|
||||||
|
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
||||||
|
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1h) fill(linear)",
|
||||||
|
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
||||||
|
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1d) fill(linear)"
|
||||||
|
]
|
||||||
|
for sql in sqls:
|
||||||
|
self.query_and_check_with_limit(sql, 5000, 1000, offset)
|
||||||
|
|
||||||
|
def test_interval_order_by_limit(self, offset: int = 0):
|
||||||
|
sqls = [
|
||||||
|
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
||||||
|
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by b",
|
||||||
|
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
||||||
|
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by a desc",
|
||||||
|
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), last(ts) from meters \
|
||||||
|
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by a desc",
|
||||||
|
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
||||||
|
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by count(*), sum(c1), a",
|
||||||
|
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
||||||
|
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by a, count(*), sum(c1)",
|
||||||
|
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
||||||
|
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by b",
|
||||||
|
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
||||||
|
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by a desc",
|
||||||
|
"select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters \
|
||||||
|
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by a desc",
|
||||||
|
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
||||||
|
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by count(*), sum(c1), a",
|
||||||
|
"select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \
|
||||||
|
where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by a, count(*), sum(c1)",
|
||||||
|
]
|
||||||
|
for sql in sqls:
|
||||||
|
self.query_and_check_with_limit(sql, 6000, 2000, offset)
|
||||||
|
|
||||||
|
def test_interval_partition_by_slimit(self, offset: int = 0):
|
||||||
|
sqls = [
|
||||||
|
"select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters "
|
||||||
|
"where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by t1 interval(1m)",
|
||||||
|
"select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters "
|
||||||
|
"where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by t1 interval(1h)",
|
||||||
|
"select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters "
|
||||||
|
"where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by c3 interval(1m)",
|
||||||
|
]
|
||||||
|
for sql in sqls:
|
||||||
|
self.query_and_check_with_slimit(sql, 10, 2, offset)
|
||||||
|
|
||||||
|
def test_group_by_operator(self):
|
||||||
|
tdSql.query('select count(*), c1+1 from meters group by tbname, c1+1', 1)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
self.prepareTestEnv()
|
||||||
|
self.test_group_by_operator()
|
||||||
|
self.test_interval_limit_offset()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success(f"{__file__} successfully executed")
|
||||||
|
|
||||||
|
event = threading.Event()
|
||||||
|
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
Loading…
Reference in New Issue