diff --git a/cmake/cmake.version b/cmake/cmake.version
index 0e35fa316f..4abc854e71 100644
--- a/cmake/cmake.version
+++ b/cmake/cmake.version
@@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "3.2.3.0.alpha")
+ SET(TD_VER_NUMBER "3.2.4.0.alpha")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
diff --git a/docs/en/08-client-libraries/07-python.mdx b/docs/en/08-client-libraries/07-python.mdx
index aacfd0fe53..3110afcf10 100644
--- a/docs/en/08-client-libraries/07-python.mdx
+++ b/docs/en/08-client-libraries/07-python.mdx
@@ -842,12 +842,12 @@ consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
In addition to native connections, the client library also supports subscriptions via websockets.
-The syntax for creating a consumer is "consumer = consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../develop/tmq/#create-a-consumer).
+The syntax for creating a consumer is "consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../develop/tmq/#create-a-consumer).
```python
import taosws
-consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
+consumer = taosws.Consumer(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
```
@@ -887,13 +887,13 @@ The `poll` function is used to consume data in tmq. The parameter of the `poll`
```python
while True:
- res = consumer.poll(1)
- if not res:
+ message = consumer.poll(1)
+ if not message:
continue
- err = res.error()
+ err = message.error()
if err is not None:
raise err
- val = res.value()
+ val = message.value()
for block in val:
print(block.fetchall())
@@ -902,16 +902,14 @@ while True:
-The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
+The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out.
```python
while True:
- res = consumer.poll(timeout=1.0)
- if not res:
+ message = consumer.poll(1)
+ if not message:
continue
- err = res.error()
- if err is not None:
- raise err
+
for block in message:
for row in block:
print(row)
diff --git a/docs/en/12-taos-sql/14-stream.md b/docs/en/12-taos-sql/14-stream.md
index 6f2343d347..e1bf18c854 100644
--- a/docs/en/12-taos-sql/14-stream.md
+++ b/docs/en/12-taos-sql/14-stream.md
@@ -41,16 +41,28 @@ window_clause: {
SESSION(ts_col, tol_val)
| STATE_WINDOW(col)
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)]
+ | EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition
+ | COUNT_WINDOW(count_val[, sliding_val])
}
```
`SESSION` indicates a session window, and `tol_val` indicates the maximum range of the time interval. If the time interval between two continuous rows are within the time interval specified by `tol_val` they belong to the same session window; otherwise a new session window is started automatically.
+`EVENT_WINDOW` is determined according to the window start condition and the window close condition. The window is started when `start_trigger_condition` is evaluated to true, the window is closed when `end_trigger_condition` is evaluated to true. `start_trigger_condition` and `end_trigger_condition` can be any conditional expressions supported by TDengine and can include multiple columns.
+
+`COUNT_WINDOW` is a counting window that is divided by a fixed number of data rows.`count_val`: A constant, which is a positive integer and must be greater than or equal to 2. The maximum value is 2147483648. `count_val` represents the maximum number of data rows contained in each `COUNT_WINDOW`. When the total number of data rows cannot be divided by `count_val`, the number of rows in the last window will be less than `count_val`. `sliding_val`: is a constant that represents the number of window slides, similar to `SLIDING` in `INTERVAL`.
+
For example, the following SQL statement creates a stream and automatically creates a supertable named `avg_vol`. The stream has a 1 minute time window that slides forward in 30 second intervals to calculate the average voltage of the meters supertable.
```sql
CREATE STREAM avg_vol_s INTO avg_vol AS
SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
+
+CREATE STREAM streams0 INTO streamt0 AS
+SELECT _wstart, count(*), avg(voltage) from meters PARTITION BY tbname EVENT_WINDOW START WITH voltage < 0 END WITH voltage > 9;
+
+CREATE STREAM streams1 IGNORE EXPIRED 1 WATERMARK 100s INTO streamt1 AS
+SELECT _wstart, count(*), avg(voltage) from meters PARTITION BY tbname COUNT_WINDOW(10);
```
## Partitions of Stream
diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md
index f5a4789976..902e62de73 100644
--- a/docs/en/28-releases/01-tdengine.md
+++ b/docs/en/28-releases/01-tdengine.md
@@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
import Release from "/components/ReleaseV3";
+## 3.2.3.0
+
+
+
## 3.2.2.0
diff --git a/docs/zh/08-connector/30-python.mdx b/docs/zh/08-connector/30-python.mdx
index e7160ab094..71dc82316e 100644
--- a/docs/zh/08-connector/30-python.mdx
+++ b/docs/zh/08-connector/30-python.mdx
@@ -856,7 +856,7 @@ taosws `Consumer` API 提供了基于 Websocket 订阅 TMQ 数据的 API。创
```python
import taosws
-consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
+consumer = taosws.Consumer(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
```
@@ -896,13 +896,13 @@ Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 flo
```python
while True:
- res = consumer.poll(1)
- if not res:
+ message = consumer.poll(1)
+ if not message:
continue
- err = res.error()
+ err = message.error()
if err is not None:
raise err
- val = res.value()
+ val = message.value()
for block in val:
print(block.fetchall())
@@ -911,16 +911,14 @@ while True:
-Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间,超时时间单位为秒(s),`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。消费者必须通过 Message 的 `error()` 方法校验返回数据的 error 信息。
+Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间,超时时间单位为秒(s),`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。
```python
while True:
- res = consumer.poll(timeout=1.0)
- if not res:
+ message = consumer.poll(1)
+ if not message:
continue
- err = res.error()
- if err is not None:
- raise err
+
for block in message:
for row in block:
print(row)
diff --git a/docs/zh/12-taos-sql/14-stream.md b/docs/zh/12-taos-sql/14-stream.md
index 979fc436b9..8868b728f8 100644
--- a/docs/zh/12-taos-sql/14-stream.md
+++ b/docs/zh/12-taos-sql/14-stream.md
@@ -49,10 +49,14 @@ window_clause: {
SESSION(ts_col, tol_val)
| STATE_WINDOW(col)
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)]
+ | EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition
+ | COUNT_WINDOW(count_val[, sliding_val])
}
```
其中,SESSION 是会话窗口,tol_val 是时间间隔的最大范围。在 tol_val 时间间隔范围内的数据都属于同一个窗口,如果连续的两条数据的时间超过 tol_val,则自动开启下一个窗口。
+EVENT_WINDOW 是事件窗口,根据开始条件和结束条件来划定窗口。当 start_trigger_condition 满足时则窗口开始,直到 end_trigger_condition 满足时窗口关闭。 start_trigger_condition 和 end_trigger_condition 可以是任意 TDengine 支持的条件表达式,且可以包含不同的列。
+COUNT_WINDOW 是计数窗口,按固定的数据行数来划分窗口。 count_val 是常量,是正整数,必须大于等于2,小于2147483648。 count_val 表示每个 COUNT_WINDOW 包含的最大数据行数,总数据行数不能整除 count_val 时,最后一个窗口的行数会小于 count_val 。 sliding_val 是常量,表示窗口滑动的数量,类似于 INTERVAL 的 SLIDING 。
窗口的定义与时序数据特色查询中的定义完全相同,详见 [TDengine 特色查询](../distinguished)
@@ -61,6 +65,12 @@ window_clause: {
```sql
CREATE STREAM avg_vol_s INTO avg_vol AS
SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
+
+CREATE STREAM streams0 INTO streamt0 AS
+SELECT _wstart, count(*), avg(voltage) from meters PARTITION BY tbname EVENT_WINDOW START WITH voltage < 0 END WITH voltage > 9;
+
+CREATE STREAM streams1 IGNORE EXPIRED 1 WATERMARK 100s INTO streamt1 AS
+SELECT _wstart, count(*), avg(voltage) from meters PARTITION BY tbname COUNT_WINDOW(10);
```
## 流式计算的 partition
diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md
index b0a81e01a1..1c51f934fe 100644
--- a/docs/zh/28-releases/01-tdengine.md
+++ b/docs/zh/28-releases/01-tdengine.md
@@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3";
+## 3.2.3.0
+
+
+
## 3.2.2.0
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index c5582ff89e..ed3cf4792a 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -582,8 +582,8 @@ typedef struct {
};
} SSubmitRsp;
-int32_t tEncodeSSubmitRsp(SEncoder* pEncoder, const SSubmitRsp* pRsp);
-int32_t tDecodeSSubmitRsp(SDecoder* pDecoder, SSubmitRsp* pRsp);
+// int32_t tEncodeSSubmitRsp(SEncoder* pEncoder, const SSubmitRsp* pRsp);
+// int32_t tDecodeSSubmitRsp(SDecoder* pDecoder, SSubmitRsp* pRsp);
// void tFreeSSubmitBlkRsp(void* param);
void tFreeSSubmitRsp(SSubmitRsp* pRsp);
@@ -886,8 +886,8 @@ typedef struct {
int64_t maxStorage;
} SCreateAcctReq, SAlterAcctReq;
-int32_t tSerializeSCreateAcctReq(void* buf, int32_t bufLen, SCreateAcctReq* pReq);
-int32_t tDeserializeSCreateAcctReq(void* buf, int32_t bufLen, SCreateAcctReq* pReq);
+// int32_t tSerializeSCreateAcctReq(void* buf, int32_t bufLen, SCreateAcctReq* pReq);
+// int32_t tDeserializeSCreateAcctReq(void* buf, int32_t bufLen, SCreateAcctReq* pReq);
typedef struct {
char user[TSDB_USER_LEN];
@@ -3447,7 +3447,7 @@ int32_t tDeserializeSCreateTagIdxReq(void* buf, int32_t bufLen, SCreateTagIndexR
typedef SMDropSmaReq SDropTagIndexReq;
-int32_t tSerializeSDropTagIdxReq(void* buf, int32_t bufLen, SDropTagIndexReq* pReq);
+// int32_t tSerializeSDropTagIdxReq(void* buf, int32_t bufLen, SDropTagIndexReq* pReq);
int32_t tDeserializeSDropTagIdxReq(void* buf, int32_t bufLen, SDropTagIndexReq* pReq);
typedef struct {
@@ -3568,8 +3568,8 @@ typedef struct {
int8_t igNotExists;
} SMDropFullTextReq;
-int32_t tSerializeSMDropFullTextReq(void* buf, int32_t bufLen, SMDropFullTextReq* pReq);
-int32_t tDeserializeSMDropFullTextReq(void* buf, int32_t bufLen, SMDropFullTextReq* pReq);
+// int32_t tSerializeSMDropFullTextReq(void* buf, int32_t bufLen, SMDropFullTextReq* pReq);
+// int32_t tDeserializeSMDropFullTextReq(void* buf, int32_t bufLen, SMDropFullTextReq* pReq);
typedef struct {
char indexFName[TSDB_INDEX_FNAME_LEN];
@@ -3821,6 +3821,7 @@ typedef struct {
uint32_t phyLen;
char* sql;
char* msg;
+ int8_t source;
} SVDeleteReq;
int32_t tSerializeSVDeleteReq(void* buf, int32_t bufLen, SVDeleteReq* pReq);
@@ -3842,6 +3843,7 @@ typedef struct SDeleteRes {
char tableFName[TSDB_TABLE_NAME_LEN];
char tsColName[TSDB_COL_NAME_LEN];
int64_t ctimeMs; // fill by vnode
+ int8_t source;
} SDeleteRes;
int32_t tEncodeDeleteRes(SEncoder* pCoder, const SDeleteRes* pRes);
diff --git a/include/libs/scheduler/scheduler.h b/include/libs/scheduler/scheduler.h
index 958d63349d..952af3c443 100644
--- a/include/libs/scheduler/scheduler.h
+++ b/include/libs/scheduler/scheduler.h
@@ -78,6 +78,7 @@ typedef struct SSchedulerReq {
void* chkKillParam;
SExecResult* pExecRes;
void** pFetchRes;
+ int8_t source;
} SSchedulerReq;
int32_t schedulerInit(void);
diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h
index 64ce735843..5f3761d7b7 100644
--- a/include/libs/stream/tstream.h
+++ b/include/libs/stream/tstream.h
@@ -56,7 +56,6 @@ extern "C" {
#define STREAM_EXEC_T_RESTART_ALL_TASKS (-4)
#define STREAM_EXEC_T_STOP_ALL_TASKS (-5)
#define STREAM_EXEC_T_RESUME_TASK (-6)
-#define STREAM_EXEC_T_UPDATE_TASK_EPSET (-7)
typedef struct SStreamTask SStreamTask;
typedef struct SStreamQueue SStreamQueue;
@@ -783,11 +782,14 @@ bool streamTaskIsAllUpstreamClosed(SStreamTask* pTask);
bool streamTaskSetSchedStatusWait(SStreamTask* pTask);
int8_t streamTaskSetSchedStatusActive(SStreamTask* pTask);
int8_t streamTaskSetSchedStatusInactive(SStreamTask* pTask);
-int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, int32_t clearRelHalt, bool metaLock);
+int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, int32_t clearRelHalt);
int32_t streamTaskHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event);
-int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent event);
-void streamTaskRestoreStatus(SStreamTask* pTask);
+
+typedef int32_t (*__state_trans_user_fn)(SStreamTask*, void* param);
+int32_t streamTaskHandleEventAsync(SStreamTaskSM* pSM, EStreamTaskEvent event, __state_trans_user_fn callbackFn, void* param);
+int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent event, __state_trans_user_fn callbackFn, void* param);
+int32_t streamTaskRestoreStatus(SStreamTask* pTask);
int32_t streamSendCheckRsp(const SStreamMeta* pMeta, const SStreamTaskCheckReq* pReq, SStreamTaskCheckRsp* pRsp,
SRpcHandleInfo* pRpcInfo, int32_t taskId);
diff --git a/include/util/tdef.h b/include/util/tdef.h
index 17254c5405..4fd06d6068 100644
--- a/include/util/tdef.h
+++ b/include/util/tdef.h
@@ -188,6 +188,8 @@ typedef enum ELogicConditionType {
LOGIC_COND_TYPE_NOT,
} ELogicConditionType;
+#define TSDB_INT32_ID_LEN 11
+
#define TSDB_NAME_DELIMITER_LEN 1
#define TSDB_UNI_LEN 24
diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h
index 989c6614a6..6650d4c8b3 100644
--- a/source/client/inc/clientInt.h
+++ b/source/client/inc/clientInt.h
@@ -284,6 +284,7 @@ typedef struct SRequestObj {
void* pWrapper;
SMetaData parseMeta;
char* effectiveUser;
+ int8_t source;
} SRequestObj;
typedef struct SSyncQueryParam {
@@ -306,10 +307,10 @@ void doFreeReqResultInfo(SReqResultInfo* pResInfo);
int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, SArray** pReq);
void syncCatalogFn(SMetaData* pResult, void* param, int32_t code);
-TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly);
+TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly, int8_t source);
TAOS_RES* taosQueryImplWithReqid(TAOS* taos, const char* sql, bool validateOnly, int64_t reqid);
-void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly);
+void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly, int8_t source);
void taosAsyncQueryImplWithReqid(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly,
int64_t reqid);
void taosAsyncFetchImpl(SRequestObj *pRequest, __taos_async_fn_t fp, void *param);
@@ -354,6 +355,7 @@ SRequestObj* acquireRequest(int64_t rid);
int32_t releaseRequest(int64_t rid);
int32_t removeRequest(int64_t rid);
void doDestroyRequest(void* p);
+int64_t removeFromMostPrevReq(SRequestObj* pRequest);
char* getDbOfConnection(STscObj* pObj);
void setConnectionDB(STscObj* pTscObj, const char* db);
diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c
index 1df50a51da..6c20813118 100644
--- a/source/client/src/clientEnv.c
+++ b/source/client/src/clientEnv.c
@@ -385,6 +385,33 @@ int32_t releaseRequest(int64_t rid) { return taosReleaseRef(clientReqRefPool, ri
int32_t removeRequest(int64_t rid) { return taosRemoveRef(clientReqRefPool, rid); }
+/// return the most previous req ref id
+int64_t removeFromMostPrevReq(SRequestObj* pRequest) {
+ int64_t mostPrevReqRefId = pRequest->self;
+ SRequestObj* pTmp = pRequest;
+ while (pTmp->relation.prevRefId) {
+ pTmp = acquireRequest(pTmp->relation.prevRefId);
+ if (pTmp) {
+ mostPrevReqRefId = pTmp->self;
+ releaseRequest(mostPrevReqRefId);
+ } else {
+ break;
+ }
+ }
+ removeRequest(mostPrevReqRefId);
+ return mostPrevReqRefId;
+}
+
+void destroyNextReq(int64_t nextRefId) {
+ if (nextRefId) {
+ SRequestObj* pObj = acquireRequest(nextRefId);
+ if (pObj) {
+ releaseRequest(nextRefId);
+ releaseRequest(nextRefId);
+ }
+ }
+}
+
void destroySubRequests(SRequestObj *pRequest) {
int32_t reqIdx = -1;
SRequestObj *pReqList[16] = {NULL};
@@ -435,7 +462,7 @@ void doDestroyRequest(void *p) {
uint64_t reqId = pRequest->requestId;
tscTrace("begin to destroy request %" PRIx64 " p:%p", reqId, pRequest);
- destroySubRequests(pRequest);
+ int64_t nextReqRefId = pRequest->relation.nextRefId;
taosHashRemove(pRequest->pTscObj->pRequests, &pRequest->self, sizeof(pRequest->self));
@@ -471,6 +498,7 @@ void doDestroyRequest(void *p) {
taosMemoryFreeClear(pRequest->sqlstr);
taosMemoryFree(pRequest);
tscTrace("end to destroy request %" PRIx64 " p:%p", reqId, pRequest);
+ destroyNextReq(nextReqRefId);
}
void destroyRequest(SRequestObj *pRequest) {
@@ -479,7 +507,7 @@ void destroyRequest(SRequestObj *pRequest) {
}
taos_stop_query(pRequest);
- removeRequest(pRequest->self);
+ removeFromMostPrevReq(pRequest);
}
void taosStopQueryImpl(SRequestObj *pRequest) {
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index de4f9f16e4..94da1e1998 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -743,6 +743,7 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
.chkKillFp = chkRequestKilled,
.chkKillParam = (void*)pRequest->self,
.pExecRes = &res,
+ .source = pRequest->source,
};
int32_t code = schedulerExecJob(&req, &pRequest->body.queryJob);
@@ -1212,6 +1213,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
.chkKillFp = chkRequestKilled,
.chkKillParam = (void*)pRequest->self,
.pExecRes = NULL,
+ .source = pRequest->source,
};
code = schedulerExecJob(&req, &pRequest->body.queryJob);
taosArrayDestroy(pNodeList);
@@ -2475,7 +2477,7 @@ void syncQueryFn(void* param, void* res, int32_t code) {
tsem_post(&pParam->sem);
}
-void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly) {
+void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly, int8_t source) {
if (sql == NULL || NULL == fp) {
terrno = TSDB_CODE_INVALID_PARA;
if (fp) {
@@ -2501,6 +2503,7 @@ void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp,
return;
}
+ pRequest->source = source;
pRequest->body.queryFp = fp;
doAsyncQuery(pRequest, false);
}
@@ -2535,7 +2538,7 @@ void taosAsyncQueryImplWithReqid(uint64_t connId, const char* sql, __taos_async_
doAsyncQuery(pRequest, false);
}
-TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly) {
+TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly, int8_t source) {
if (NULL == taos) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
return NULL;
@@ -2550,7 +2553,7 @@ TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly) {
}
tsem_init(¶m->sem, 0, 0);
- taosAsyncQueryImpl(*(int64_t*)taos, sql, syncQueryFn, param, validateOnly);
+ taosAsyncQueryImpl(*(int64_t*)taos, sql, syncQueryFn, param, validateOnly, source);
tsem_wait(¶m->sem);
SRequestObj* pRequest = NULL;
diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c
index 275ca0d2aa..47adb40eaa 100644
--- a/source/client/src/clientMain.c
+++ b/source/client/src/clientMain.c
@@ -402,7 +402,7 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
return pResInfo->userFields;
}
-TAOS_RES *taos_query(TAOS *taos, const char *sql) { return taosQueryImpl(taos, sql, false); }
+TAOS_RES *taos_query(TAOS *taos, const char *sql) { return taosQueryImpl(taos, sql, false, TD_REQ_FROM_APP); }
TAOS_RES *taos_query_with_reqid(TAOS *taos, const char *sql, int64_t reqid) {
return taosQueryImplWithReqid(taos, sql, false, reqid);
}
@@ -828,7 +828,7 @@ int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex) {
}
int taos_validate_sql(TAOS *taos, const char *sql) {
- TAOS_RES *pObj = taosQueryImpl(taos, sql, true);
+ TAOS_RES *pObj = taosQueryImpl(taos, sql, true, TD_REQ_FROM_APP);
int code = taos_errno(pObj);
@@ -1126,7 +1126,7 @@ void continueInsertFromCsv(SSqlCallbackWrapper *pWrapper, SRequestObj *pRequest)
void taos_query_a(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param) {
int64_t connId = *(int64_t *)taos;
tscDebug("taos_query_a start with sql:%s", sql);
- taosAsyncQueryImpl(connId, sql, fp, param, false);
+ taosAsyncQueryImpl(connId, sql, fp, param, false, TD_REQ_FROM_APP);
tscDebug("taos_query_a end with sql:%s", sql);
}
@@ -1254,54 +1254,34 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
}
void restartAsyncQuery(SRequestObj *pRequest, int32_t code) {
- int32_t reqIdx = 0;
- SRequestObj *pReqList[16] = {NULL};
- SRequestObj *pUserReq = NULL;
- pReqList[0] = pRequest;
- uint64_t tmpRefId = 0;
- SRequestObj *pTmp = pRequest;
- while (pTmp->relation.prevRefId) {
- tmpRefId = pTmp->relation.prevRefId;
- pTmp = acquireRequest(tmpRefId);
- if (pTmp) {
- pReqList[++reqIdx] = pTmp;
- releaseRequest(tmpRefId);
- } else {
- tscError("prev req ref 0x%" PRIx64 " is not there", tmpRefId);
+ tscInfo("restart request: %s p: %p", pRequest->sqlstr, pRequest);
+ SRequestObj* pUserReq = pRequest;
+ acquireRequest(pRequest->self);
+ while (pUserReq) {
+ if (pUserReq->self == pUserReq->relation.userRefId || pUserReq->relation.userRefId == 0) {
break;
- }
- }
-
- tmpRefId = pRequest->relation.nextRefId;
- while (tmpRefId) {
- pTmp = acquireRequest(tmpRefId);
- if (pTmp) {
- tmpRefId = pTmp->relation.nextRefId;
- removeRequest(pTmp->self);
- releaseRequest(pTmp->self);
} else {
- tscError("next req ref 0x%" PRIx64 " is not there", tmpRefId);
- break;
+ int64_t nextRefId = pUserReq->relation.nextRefId;
+ releaseRequest(pUserReq->self);
+ if (nextRefId) {
+ pUserReq = acquireRequest(nextRefId);
+ }
}
}
-
- for (int32_t i = reqIdx; i >= 0; i--) {
- destroyCtxInRequest(pReqList[i]);
- if (pReqList[i]->relation.userRefId == pReqList[i]->self || 0 == pReqList[i]->relation.userRefId) {
- pUserReq = pReqList[i];
- } else {
- removeRequest(pReqList[i]->self);
- }
- }
-
+ bool hasSubRequest = pUserReq != pRequest || pRequest->relation.prevRefId != 0;
if (pUserReq) {
+ destroyCtxInRequest(pUserReq);
pUserReq->prevCode = code;
memset(&pUserReq->relation, 0, sizeof(pUserReq->relation));
} else {
- tscError("user req is missing");
+ tscError("User req is missing");
+ removeFromMostPrevReq(pRequest);
return;
}
-
+ if (hasSubRequest)
+ removeFromMostPrevReq(pRequest);
+ else
+ releaseRequest(pUserReq->self);
doAsyncQuery(pUserReq, true);
}
diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c
index f143624bab..adc8c361cd 100644
--- a/source/client/src/clientRawBlockWrite.c
+++ b/source/client/src/clientRawBlockWrite.c
@@ -1256,7 +1256,7 @@ static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) {
snprintf(sql, sizeof(sql), "delete from `%s` where `%s` >= %" PRId64 " and `%s` <= %" PRId64, req.tableFName,
req.tsColName, req.skey, req.tsColName, req.ekey);
- TAOS_RES* res = taos_query(taos, sql);
+ TAOS_RES* res = taosQueryImpl(taos, sql, false, TD_REQ_FROM_TAOX);
SRequestObj* pRequest = (SRequestObj*)res;
code = pRequest->code;
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST || code == TSDB_CODE_PAR_GET_META_ERROR) {
diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c
index a893b27896..0270ae9657 100644
--- a/source/client/src/clientTmq.c
+++ b/source/client/src/clientTmq.c
@@ -1010,19 +1010,8 @@ int32_t tmq_unsubscribe(tmq_t* tmq) {
}
taosSsleep(2); // sleep 2s for hb to send offset and rows to server
- int32_t rsp;
- int32_t retryCnt = 0;
tmq_list_t* lst = tmq_list_new();
- while (1) {
- rsp = tmq_subscribe(tmq, lst);
- if (rsp != TSDB_CODE_MND_CONSUMER_NOT_READY || retryCnt > 5) {
- break;
- } else {
- retryCnt++;
- taosMsleep(500);
- }
- }
-
+ int32_t rsp = tmq_subscribe(tmq, lst);
tmq_list_destroy(lst);
return rsp;
}
@@ -1272,10 +1261,9 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
}
int32_t retryCnt = 0;
- while (syncAskEp(tmq) != 0) {
- if (retryCnt++ > MAX_RETRY_COUNT) {
+ while ((code = syncAskEp(tmq)) != 0) {
+ if (retryCnt++ > MAX_RETRY_COUNT || code == TSDB_CODE_MND_CONSUMER_NOT_EXIST) {
tscError("consumer:0x%" PRIx64 ", mnd not ready for subscribe, retry more than 2 minutes", tmq->consumerId);
- code = TSDB_CODE_MND_CONSUMER_NOT_READY;
goto FAIL;
}
@@ -2148,26 +2136,19 @@ int32_t tmq_consumer_close(tmq_t* tmq) {
if (tmq->status == TMQ_CONSUMER_STATUS__READY) {
// if auto commit is set, commit before close consumer. Otherwise, do nothing.
if (tmq->autoCommit) {
- int32_t rsp = tmq_commit_sync(tmq, NULL);
- if (rsp != 0) {
- return rsp;
+ int32_t code = tmq_commit_sync(tmq, NULL);
+ if (code != 0) {
+ return code;
}
}
taosSsleep(2); // sleep 2s for hb to send offset and rows to server
- int32_t retryCnt = 0;
tmq_list_t* lst = tmq_list_new();
- while (1) {
- int32_t rsp = tmq_subscribe(tmq, lst);
- if (rsp != TSDB_CODE_MND_CONSUMER_NOT_READY || retryCnt > 5) {
- break;
- } else {
- retryCnt++;
- taosMsleep(500);
- }
- }
-
+ int32_t code = tmq_subscribe(tmq, lst);
tmq_list_destroy(lst);
+ if (code != 0) {
+ return code;
+ }
} else {
tscInfo("consumer:0x%" PRIx64 " not in ready state, close it directly", tmq->consumerId);
}
diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c
index 2412f10db4..bcfab8c59c 100644
--- a/source/common/src/tdatablock.c
+++ b/source/common/src/tdatablock.c
@@ -458,20 +458,21 @@ int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* p
}
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
+ int32_t newLen = pSource->varmeta.length;
memcpy(pColumnInfoData->varmeta.offset, pSource->varmeta.offset, sizeof(int32_t) * numOfRows);
- if (pColumnInfoData->varmeta.allocLen < pSource->varmeta.length) {
- char* tmp = taosMemoryRealloc(pColumnInfoData->pData, pSource->varmeta.length);
+ if (pColumnInfoData->varmeta.allocLen < newLen) {
+ char* tmp = taosMemoryRealloc(pColumnInfoData->pData, newLen);
if (tmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pColumnInfoData->pData = tmp;
- pColumnInfoData->varmeta.allocLen = pSource->varmeta.length;
+ pColumnInfoData->varmeta.allocLen = newLen;
}
- pColumnInfoData->varmeta.length = pSource->varmeta.length;
+ pColumnInfoData->varmeta.length = newLen;
if (pColumnInfoData->pData != NULL && pSource->pData != NULL) {
- memcpy(pColumnInfoData->pData, pSource->pData, pSource->varmeta.length);
+ memcpy(pColumnInfoData->pData, pSource->pData, newLen);
}
} else {
memcpy(pColumnInfoData->nullbitmap, pSource->nullbitmap, BitmapLen(numOfRows));
@@ -1857,7 +1858,29 @@ int32_t blockDataTrimFirstRows(SSDataBlock* pBlock, size_t n) {
}
static void colDataKeepFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) {
+ if (n >= total || n == 0) return;
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
+ if (pColInfoData->varmeta.length != 0) {
+ int32_t newLen = pColInfoData->varmeta.offset[n];
+ if (-1 == newLen) {
+ for (int i = n - 1; i >= 0; --i) {
+ newLen = pColInfoData->varmeta.offset[i];
+ if (newLen != -1) {
+ if (pColInfoData->info.type == TSDB_DATA_TYPE_JSON) {
+ newLen += getJsonValueLen(pColInfoData->pData + newLen);
+ } else {
+ newLen += varDataTLen(pColInfoData->pData + newLen);
+ }
+ break;
+ }
+ }
+ }
+ if (newLen <= -1) {
+ uFatal("colDataKeepFirstNRows: newLen:%d old:%d", newLen, pColInfoData->varmeta.length);
+ } else {
+ pColInfoData->varmeta.length = newLen;
+ }
+ }
// pColInfoData->varmeta.length = colDataMoveVarData(pColInfoData, 0, n);
memset(&pColInfoData->varmeta.offset[n], 0, total - n);
}
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index c5a26c5c10..fde8313228 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -58,7 +58,7 @@ int32_t tsNumOfMnodeQueryThreads = 4;
int32_t tsNumOfMnodeFetchThreads = 1;
int32_t tsNumOfMnodeReadThreads = 1;
int32_t tsNumOfVnodeQueryThreads = 4;
-float tsRatioOfVnodeStreamThreads = 1.5F;
+float tsRatioOfVnodeStreamThreads = 0.5F;
int32_t tsNumOfVnodeFetchThreads = 4;
int32_t tsNumOfVnodeRsmaThreads = 2;
int32_t tsNumOfQnodeQueryThreads = 4;
@@ -586,7 +586,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
tsNumOfSupportVnodes = tsNumOfCores * 2;
tsNumOfSupportVnodes = TMAX(tsNumOfSupportVnodes, 2);
- if (cfgAddInt32(pCfg, "supportVnodes", tsNumOfSupportVnodes, 0, 4096, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) return -1;
+ if (cfgAddInt32(pCfg, "supportVnodes", tsNumOfSupportVnodes, 0, 4096, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != 0) return -1;
if (cfgAddInt32(pCfg, "statusInterval", tsStatusInterval, 1, 30, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) return -1;
if (cfgAddInt32(pCfg, "minSlidingTime", tsMinSlidingTime, 1, 1000000, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT) != 0)
diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c
index a1d279b494..9138d7c983 100644
--- a/source/common/src/tmsg.c
+++ b/source/common/src/tmsg.c
@@ -1009,19 +1009,19 @@ int32_t tDeserializeSCreateTagIdxReq(void *buf, int32_t bufLen, SCreateTagIndexR
tDecoderClear(&decoder);
return 0;
}
-int32_t tSerializeSDropTagIdxReq(void *buf, int32_t bufLen, SDropTagIndexReq *pReq) {
- SEncoder encoder = {0};
- tEncoderInit(&encoder, buf, bufLen);
- if (tStartEncode(&encoder) < 0) return -1;
- tEndEncode(&encoder);
+// int32_t tSerializeSDropTagIdxReq(void *buf, int32_t bufLen, SDropTagIndexReq *pReq) {
+// SEncoder encoder = {0};
+// tEncoderInit(&encoder, buf, bufLen);
+// if (tStartEncode(&encoder) < 0) return -1;
+// tEndEncode(&encoder);
- if (tEncodeCStr(&encoder, pReq->name) < 0) return -1;
- if (tEncodeI8(&encoder, pReq->igNotExists) < 0) return -1;
+// if (tEncodeCStr(&encoder, pReq->name) < 0) return -1;
+// if (tEncodeI8(&encoder, pReq->igNotExists) < 0) return -1;
- int32_t tlen = encoder.pos;
- tEncoderClear(&encoder);
- return tlen;
-}
+// int32_t tlen = encoder.pos;
+// tEncoderClear(&encoder);
+// return tlen;
+// }
int32_t tDeserializeSDropTagIdxReq(void *buf, int32_t bufLen, SDropTagIndexReq *pReq) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
@@ -1035,6 +1035,7 @@ int32_t tDeserializeSDropTagIdxReq(void *buf, int32_t bufLen, SDropTagIndexReq *
return 0;
}
+
int32_t tSerializeSMCreateFullTextReq(void *buf, int32_t bufLen, SMCreateFullTextReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@@ -1059,32 +1060,32 @@ void tFreeSMCreateFullTextReq(SMCreateFullTextReq *pReq) {
// impl later
return;
}
-int32_t tSerializeSMDropFullTextReq(void *buf, int32_t bufLen, SMDropFullTextReq *pReq) {
- SEncoder encoder = {0};
- tEncoderInit(&encoder, buf, bufLen);
+// int32_t tSerializeSMDropFullTextReq(void *buf, int32_t bufLen, SMDropFullTextReq *pReq) {
+// SEncoder encoder = {0};
+// tEncoderInit(&encoder, buf, bufLen);
- if (tStartEncode(&encoder) < 0) return -1;
+// if (tStartEncode(&encoder) < 0) return -1;
- if (tEncodeCStr(&encoder, pReq->name) < 0) return -1;
+// if (tEncodeCStr(&encoder, pReq->name) < 0) return -1;
- if (tEncodeI8(&encoder, pReq->igNotExists) < 0) return -1;
+// if (tEncodeI8(&encoder, pReq->igNotExists) < 0) return -1;
- tEndEncode(&encoder);
- int32_t tlen = encoder.pos;
- tEncoderClear(&encoder);
- return tlen;
-}
-int32_t tDeserializeSMDropFullTextReq(void *buf, int32_t bufLen, SMDropFullTextReq *pReq) {
- SDecoder decoder = {0};
- tDecoderInit(&decoder, buf, bufLen);
- if (tStartDecode(&decoder) < 0) return -1;
- if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1;
- if (tDecodeI8(&decoder, &pReq->igNotExists) < 0) return -1;
+// tEndEncode(&encoder);
+// int32_t tlen = encoder.pos;
+// tEncoderClear(&encoder);
+// return tlen;
+// }
+// int32_t tDeserializeSMDropFullTextReq(void *buf, int32_t bufLen, SMDropFullTextReq *pReq) {
+// SDecoder decoder = {0};
+// tDecoderInit(&decoder, buf, bufLen);
+// if (tStartDecode(&decoder) < 0) return -1;
+// if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1;
+// if (tDecodeI8(&decoder, &pReq->igNotExists) < 0) return -1;
- tEndDecode(&decoder);
- tDecoderClear(&decoder);
- return 0;
-}
+// tEndDecode(&decoder);
+// tDecoderClear(&decoder);
+// return 0;
+// }
int32_t tSerializeSNotifyReq(void *buf, int32_t bufLen, SNotifyReq *pReq) {
SEncoder encoder = {0};
@@ -1474,44 +1475,44 @@ void tFreeSStatisReq(SStatisReq *pReq) {
taosMemoryFreeClear(pReq->pCont);
}
-int32_t tSerializeSCreateAcctReq(void *buf, int32_t bufLen, SCreateAcctReq *pReq) {
- SEncoder encoder = {0};
- tEncoderInit(&encoder, buf, bufLen);
+// int32_t tSerializeSCreateAcctReq(void *buf, int32_t bufLen, SCreateAcctReq *pReq) {
+// SEncoder encoder = {0};
+// tEncoderInit(&encoder, buf, bufLen);
- if (tStartEncode(&encoder) < 0) return -1;
- if (tEncodeCStr(&encoder, pReq->user) < 0) return -1;
- if (tEncodeCStr(&encoder, pReq->pass) < 0) return -1;
- if (tEncodeI32(&encoder, pReq->maxUsers) < 0) return -1;
- if (tEncodeI32(&encoder, pReq->maxDbs) < 0) return -1;
- if (tEncodeI32(&encoder, pReq->maxTimeSeries) < 0) return -1;
- if (tEncodeI32(&encoder, pReq->maxStreams) < 0) return -1;
- if (tEncodeI32(&encoder, pReq->accessState) < 0) return -1;
- if (tEncodeI64(&encoder, pReq->maxStorage) < 0) return -1;
- tEndEncode(&encoder);
+// if (tStartEncode(&encoder) < 0) return -1;
+// if (tEncodeCStr(&encoder, pReq->user) < 0) return -1;
+// if (tEncodeCStr(&encoder, pReq->pass) < 0) return -1;
+// if (tEncodeI32(&encoder, pReq->maxUsers) < 0) return -1;
+// if (tEncodeI32(&encoder, pReq->maxDbs) < 0) return -1;
+// if (tEncodeI32(&encoder, pReq->maxTimeSeries) < 0) return -1;
+// if (tEncodeI32(&encoder, pReq->maxStreams) < 0) return -1;
+// if (tEncodeI32(&encoder, pReq->accessState) < 0) return -1;
+// if (tEncodeI64(&encoder, pReq->maxStorage) < 0) return -1;
+// tEndEncode(&encoder);
- int32_t tlen = encoder.pos;
- tEncoderClear(&encoder);
- return tlen;
-}
+// int32_t tlen = encoder.pos;
+// tEncoderClear(&encoder);
+// return tlen;
+// }
-int32_t tDeserializeSCreateAcctReq(void *buf, int32_t bufLen, SCreateAcctReq *pReq) {
- SDecoder decoder = {0};
- tDecoderInit(&decoder, buf, bufLen);
+// int32_t tDeserializeSCreateAcctReq(void *buf, int32_t bufLen, SCreateAcctReq *pReq) {
+// SDecoder decoder = {0};
+// tDecoderInit(&decoder, buf, bufLen);
- if (tStartDecode(&decoder) < 0) return -1;
- if (tDecodeCStrTo(&decoder, pReq->user) < 0) return -1;
- if (tDecodeCStrTo(&decoder, pReq->pass) < 0) return -1;
- if (tDecodeI32(&decoder, &pReq->maxUsers) < 0) return -1;
- if (tDecodeI32(&decoder, &pReq->maxDbs) < 0) return -1;
- if (tDecodeI32(&decoder, &pReq->maxTimeSeries) < 0) return -1;
- if (tDecodeI32(&decoder, &pReq->maxStreams) < 0) return -1;
- if (tDecodeI32(&decoder, &pReq->accessState) < 0) return -1;
- if (tDecodeI64(&decoder, &pReq->maxStorage) < 0) return -1;
- tEndDecode(&decoder);
+// if (tStartDecode(&decoder) < 0) return -1;
+// if (tDecodeCStrTo(&decoder, pReq->user) < 0) return -1;
+// if (tDecodeCStrTo(&decoder, pReq->pass) < 0) return -1;
+// if (tDecodeI32(&decoder, &pReq->maxUsers) < 0) return -1;
+// if (tDecodeI32(&decoder, &pReq->maxDbs) < 0) return -1;
+// if (tDecodeI32(&decoder, &pReq->maxTimeSeries) < 0) return -1;
+// if (tDecodeI32(&decoder, &pReq->maxStreams) < 0) return -1;
+// if (tDecodeI32(&decoder, &pReq->accessState) < 0) return -1;
+// if (tDecodeI64(&decoder, &pReq->maxStorage) < 0) return -1;
+// tEndDecode(&decoder);
- tDecoderClear(&decoder);
- return 0;
-}
+// tDecoderClear(&decoder);
+// return 0;
+// }
int32_t tSerializeSDropUserReq(void *buf, int32_t bufLen, SDropUserReq *pReq) {
SEncoder encoder = {0};
@@ -7191,6 +7192,7 @@ int32_t tSerializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq) {
if (tEncodeU32(&encoder, pReq->sqlLen) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->sql) < 0) return -1;
if (tEncodeBinary(&encoder, pReq->msg, pReq->phyLen) < 0) return -1;
+ if (tEncodeI8(&encoder, pReq->source) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@@ -7227,6 +7229,9 @@ int32_t tDeserializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq) {
if (tDecodeBinaryAlloc(&decoder, (void **)&pReq->msg, &msgLen) < 0) return -1;
pReq->phyLen = msgLen;
+ if (!tDecodeIsEnd(&decoder)) {
+ if (tDecodeI8(&decoder, &pReq->source) < 0) return -1;
+ }
tEndDecode(&decoder);
tDecoderClear(&decoder);
@@ -7934,64 +7939,64 @@ static int32_t tEncodeSSubmitBlkRsp(SEncoder *pEncoder, const SSubmitBlkRsp *pBl
return 0;
}
-static int32_t tDecodeSSubmitBlkRsp(SDecoder *pDecoder, SSubmitBlkRsp *pBlock) {
- if (tStartDecode(pDecoder) < 0) return -1;
+// static int32_t tDecodeSSubmitBlkRsp(SDecoder *pDecoder, SSubmitBlkRsp *pBlock) {
+// if (tStartDecode(pDecoder) < 0) return -1;
- if (tDecodeI32(pDecoder, &pBlock->code) < 0) return -1;
- if (tDecodeI64(pDecoder, &pBlock->uid) < 0) return -1;
- pBlock->tblFName = taosMemoryCalloc(TSDB_TABLE_FNAME_LEN, 1);
- if (NULL == pBlock->tblFName) return -1;
- if (tDecodeCStrTo(pDecoder, pBlock->tblFName) < 0) return -1;
- if (tDecodeI32v(pDecoder, &pBlock->numOfRows) < 0) return -1;
- if (tDecodeI32v(pDecoder, &pBlock->affectedRows) < 0) return -1;
- if (tDecodeI64v(pDecoder, &pBlock->sver) < 0) return -1;
+// if (tDecodeI32(pDecoder, &pBlock->code) < 0) return -1;
+// if (tDecodeI64(pDecoder, &pBlock->uid) < 0) return -1;
+// pBlock->tblFName = taosMemoryCalloc(TSDB_TABLE_FNAME_LEN, 1);
+// if (NULL == pBlock->tblFName) return -1;
+// if (tDecodeCStrTo(pDecoder, pBlock->tblFName) < 0) return -1;
+// if (tDecodeI32v(pDecoder, &pBlock->numOfRows) < 0) return -1;
+// if (tDecodeI32v(pDecoder, &pBlock->affectedRows) < 0) return -1;
+// if (tDecodeI64v(pDecoder, &pBlock->sver) < 0) return -1;
- int32_t meta = 0;
- if (tDecodeI32(pDecoder, &meta) < 0) return -1;
- if (meta) {
- pBlock->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
- if (NULL == pBlock->pMeta) return -1;
- if (tDecodeSTableMetaRsp(pDecoder, pBlock->pMeta) < 0) return -1;
- } else {
- pBlock->pMeta = NULL;
- }
+// int32_t meta = 0;
+// if (tDecodeI32(pDecoder, &meta) < 0) return -1;
+// if (meta) {
+// pBlock->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
+// if (NULL == pBlock->pMeta) return -1;
+// if (tDecodeSTableMetaRsp(pDecoder, pBlock->pMeta) < 0) return -1;
+// } else {
+// pBlock->pMeta = NULL;
+// }
- tEndDecode(pDecoder);
- return 0;
-}
+// tEndDecode(pDecoder);
+// return 0;
+// }
-int32_t tEncodeSSubmitRsp(SEncoder *pEncoder, const SSubmitRsp *pRsp) {
- int32_t nBlocks = taosArrayGetSize(pRsp->pArray);
+// int32_t tEncodeSSubmitRsp(SEncoder *pEncoder, const SSubmitRsp *pRsp) {
+// int32_t nBlocks = taosArrayGetSize(pRsp->pArray);
- if (tStartEncode(pEncoder) < 0) return -1;
+// if (tStartEncode(pEncoder) < 0) return -1;
- if (tEncodeI32v(pEncoder, pRsp->numOfRows) < 0) return -1;
- if (tEncodeI32v(pEncoder, pRsp->affectedRows) < 0) return -1;
- if (tEncodeI32v(pEncoder, nBlocks) < 0) return -1;
- for (int32_t iBlock = 0; iBlock < nBlocks; iBlock++) {
- if (tEncodeSSubmitBlkRsp(pEncoder, (SSubmitBlkRsp *)taosArrayGet(pRsp->pArray, iBlock)) < 0) return -1;
- }
+// if (tEncodeI32v(pEncoder, pRsp->numOfRows) < 0) return -1;
+// if (tEncodeI32v(pEncoder, pRsp->affectedRows) < 0) return -1;
+// if (tEncodeI32v(pEncoder, nBlocks) < 0) return -1;
+// for (int32_t iBlock = 0; iBlock < nBlocks; iBlock++) {
+// if (tEncodeSSubmitBlkRsp(pEncoder, (SSubmitBlkRsp *)taosArrayGet(pRsp->pArray, iBlock)) < 0) return -1;
+// }
- tEndEncode(pEncoder);
- return 0;
-}
+// tEndEncode(pEncoder);
+// return 0;
+// }
-int32_t tDecodeSSubmitRsp(SDecoder *pDecoder, SSubmitRsp *pRsp) {
- if (tStartDecode(pDecoder) < 0) return -1;
+// int32_t tDecodeSSubmitRsp(SDecoder *pDecoder, SSubmitRsp *pRsp) {
+// if (tStartDecode(pDecoder) < 0) return -1;
- if (tDecodeI32v(pDecoder, &pRsp->numOfRows) < 0) return -1;
- if (tDecodeI32v(pDecoder, &pRsp->affectedRows) < 0) return -1;
- if (tDecodeI32v(pDecoder, &pRsp->nBlocks) < 0) return -1;
- pRsp->pBlocks = taosMemoryCalloc(pRsp->nBlocks, sizeof(*pRsp->pBlocks));
- if (pRsp->pBlocks == NULL) return -1;
- for (int32_t iBlock = 0; iBlock < pRsp->nBlocks; iBlock++) {
- if (tDecodeSSubmitBlkRsp(pDecoder, pRsp->pBlocks + iBlock) < 0) return -1;
- }
+// if (tDecodeI32v(pDecoder, &pRsp->numOfRows) < 0) return -1;
+// if (tDecodeI32v(pDecoder, &pRsp->affectedRows) < 0) return -1;
+// if (tDecodeI32v(pDecoder, &pRsp->nBlocks) < 0) return -1;
+// pRsp->pBlocks = taosMemoryCalloc(pRsp->nBlocks, sizeof(*pRsp->pBlocks));
+// if (pRsp->pBlocks == NULL) return -1;
+// for (int32_t iBlock = 0; iBlock < pRsp->nBlocks; iBlock++) {
+// if (tDecodeSSubmitBlkRsp(pDecoder, pRsp->pBlocks + iBlock) < 0) return -1;
+// }
- tEndDecode(pDecoder);
- tDecoderClear(pDecoder);
- return 0;
-}
+// tEndDecode(pDecoder);
+// tDecoderClear(pDecoder);
+// return 0;
+// }
// void tFreeSSubmitBlkRsp(void *param) {
// if (NULL == param) {
@@ -8426,6 +8431,7 @@ int32_t tEncodeDeleteRes(SEncoder *pCoder, const SDeleteRes *pRes) {
if (tEncodeCStr(pCoder, pRes->tableFName) < 0) return -1;
if (tEncodeCStr(pCoder, pRes->tsColName) < 0) return -1;
if (tEncodeI64(pCoder, pRes->ctimeMs) < 0) return -1;
+ if (tEncodeI8(pCoder, pRes->source) < 0) return -1;
return 0;
}
@@ -8450,6 +8456,9 @@ int32_t tDecodeDeleteRes(SDecoder *pCoder, SDeleteRes *pRes) {
if (!tDecodeIsEnd(pCoder)) {
if (tDecodeI64(pCoder, &pRes->ctimeMs) < 0) return -1;
}
+ if (!tDecodeIsEnd(pCoder)) {
+ if (tDecodeI8(pCoder, &pRes->source) < 0) return -1;
+ }
return 0;
}
diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
index 1a31f08801..77760b16f4 100644
--- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c
+++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
@@ -345,6 +345,7 @@ int32_t dmInitClient(SDnode *pDnode) {
rpcInit.parent = pDnode;
rpcInit.rfp = rpcRfp;
rpcInit.compressSize = tsCompressMsgSize;
+ rpcInit.dfp = destroyAhandle;
rpcInit.retryMinInterval = tsRedirectPeriod;
rpcInit.retryStepFactor = tsRedirectFactor;
diff --git a/source/dnode/mnode/impl/inc/mndStream.h b/source/dnode/mnode/impl/inc/mndStream.h
index 57fd187da3..5307ff4b05 100644
--- a/source/dnode/mnode/impl/inc/mndStream.h
+++ b/source/dnode/mnode/impl/inc/mndStream.h
@@ -111,7 +111,7 @@ STrans *doCreateTrans(SMnode *pMnode, SStreamObj *pStream, SRpcMsg *pReq, const
int32_t mndPersistTransLog(SStreamObj *pStream, STrans *pTrans, int32_t status);
SSdbRaw *mndStreamActionEncode(SStreamObj *pStream);
void killAllCheckpointTrans(SMnode *pMnode, SVgroupChangeInfo *pChangeInfo);
-int32_t mndStreamSetUpdateEpsetAction(SStreamObj *pStream, SVgroupChangeInfo *pInfo, STrans *pTrans);
+int32_t mndStreamSetUpdateEpsetAction(SMnode *pMnode, SStreamObj *pStream, SVgroupChangeInfo *pInfo, STrans *pTrans);
SStreamObj *mndGetStreamObj(SMnode *pMnode, int64_t streamId);
int32_t extractNodeEpset(SMnode *pMnode, SEpSet *pEpSet, bool *hasEpset, int32_t taskId, int32_t nodeId);
diff --git a/source/dnode/mnode/impl/src/mndCompact.c b/source/dnode/mnode/impl/src/mndCompact.c
index 2d714596a9..75b4531dbb 100644
--- a/source/dnode/mnode/impl/src/mndCompact.c
+++ b/source/dnode/mnode/impl/src/mndCompact.c
@@ -454,7 +454,7 @@ int32_t mndProcessKillCompactReq(SRpcMsg *pReq){
code = TSDB_CODE_ACTION_IN_PROGRESS;
- char obj[MND_COMPACT_ID_LEN] = {0};
+ char obj[TSDB_INT32_ID_LEN] = {0};
sprintf(obj, "%d", pCompact->compactId);
auditRecord(pReq, pMnode->clusterId, "killCompact", pCompact->dbname, obj, killCompactReq.sql, killCompactReq.sqlLen);
diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c
index 79a5f5fd83..befb6d3521 100644
--- a/source/dnode/mnode/impl/src/mndDnode.c
+++ b/source/dnode/mnode/impl/src/mndDnode.c
@@ -610,7 +610,7 @@ static int32_t mndProcessStatisReq(SRpcMsg *pReq) {
for(int32_t j = 0; j < tagSize; j++){
SJson* item = tjsonGetArrayItem(arrayTag, j);
- *(labels + j) = taosMemoryMalloc(MONITOR_TAG_NAME_LEN);
+ *(labels + j) = taosMemoryMalloc(MONITOR_TAG_NAME_LEN);
tjsonGetStringValue(item, "name", *(labels + j));
*(sample_labels + j) = taosMemoryMalloc(MONITOR_TAG_VALUE_LEN);
@@ -626,7 +626,7 @@ static int32_t mndProcessStatisReq(SRpcMsg *pReq) {
for(int32_t j = 0; j < metricLen; j++){
SJson *item = tjsonGetArrayItem(metrics, j);
- char name[MONITOR_METRIC_NAME_LEN] = {0};
+ char name[MONITOR_METRIC_NAME_LEN] = {0};
tjsonGetStringValue(item, "name", name);
double value = 0;
@@ -636,7 +636,7 @@ static int32_t mndProcessStatisReq(SRpcMsg *pReq) {
tjsonGetDoubleValue(item, "type", &type);
int32_t metricNameLen = strlen(name) + strlen(tableName) + 2;
- char* metricName = taosMemoryMalloc(metricNameLen);
+ char* metricName = taosMemoryMalloc(metricNameLen);
memset(metricName, 0, metricNameLen);
sprintf(metricName, "%s:%s", tableName, name);
@@ -669,7 +669,7 @@ static int32_t mndProcessStatisReq(SRpcMsg *pReq) {
else{
mTrace("get metric from registry:%p", metric);
}
-
+
if(type == 0){
taos_counter_add(metric, value, (const char**)sample_labels);
}
@@ -689,7 +689,7 @@ static int32_t mndProcessStatisReq(SRpcMsg *pReq) {
taosMemoryFreeClear(labels);
}
}
-
+
}
code = 0;
@@ -1409,24 +1409,6 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
if (strcasecmp(cfgReq.config, "resetlog") == 0) {
strcpy(dcfgReq.config, "resetlog");
#ifdef TD_ENTERPRISE
- } else if (strncasecmp(cfgReq.config, "supportvnodes", 13) == 0) {
- int32_t optLen = strlen("supportvnodes");
- int32_t flag = -1;
- int32_t code = mndMCfgGetValInt32(&cfgReq, optLen, &flag);
- if (code < 0) return code;
-
- if (flag < 0 || flag > 4096) {
- mError("dnode:%d, failed to config supportVnodes since value:%d. Valid range: [0, 4096]", cfgReq.dnodeId, flag);
- terrno = TSDB_CODE_OUT_OF_RANGE;
- goto _err_out;
- }
- if (flag == 0) {
- flag = tsNumOfCores * 2;
- }
- flag = TMAX(flag, 2);
-
- strcpy(dcfgReq.config, "supportvnodes");
- snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%d", flag);
} else if (strncasecmp(cfgReq.config, "s3blocksize", 11) == 0) {
int32_t optLen = strlen("s3blocksize");
int32_t flag = -1;
diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c
index b188d314d9..69a0bd477d 100644
--- a/source/dnode/mnode/impl/src/mndMain.c
+++ b/source/dnode/mnode/impl/src/mndMain.c
@@ -709,7 +709,8 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) {
int32_t code = syncProcessMsg(pMgmt->sync, pMsg);
if (code != 0) {
- mGError("vgId:1, failed to process sync msg:%p type:%s since %s", pMsg, TMSG_INFO(pMsg->msgType), terrstr());
+ mGError("vgId:1, failed to process sync msg:%p type:%s, errno: %s, code:0x%x", pMsg, TMSG_INFO(pMsg->msgType),
+ terrstr(), code);
}
return code;
diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c
index 3ef2f64df7..50e3dd0d03 100644
--- a/source/dnode/mnode/impl/src/mndStream.c
+++ b/source/dnode/mnode/impl/src/mndStream.c
@@ -721,6 +721,8 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
goto _OVER;
}
+ // add into buffer firstly
+ // to make sure when the hb from vnode arrived, the newly created tasks have been in the task map already.
taosThreadMutexLock(&execInfo.lock);
mDebug("stream stream:%s tasks register into node list", createReq.name);
saveStreamTasksInfo(&streamObj, &execInfo);
@@ -1811,7 +1813,7 @@ static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChange
mDebug("stream:0x%" PRIx64 " %s involved node changed, create update trans, transId:%d", pStream->uid,
pStream->name, pTrans->id);
- int32_t code = mndStreamSetUpdateEpsetAction(pStream, pChangeInfo, pTrans);
+ int32_t code = mndStreamSetUpdateEpsetAction(pMnode, pStream, pChangeInfo, pTrans);
// todo: not continue, drop all and retry again
if (code != TSDB_CODE_SUCCESS) {
diff --git a/source/dnode/mnode/impl/src/mndStreamUtil.c b/source/dnode/mnode/impl/src/mndStreamUtil.c
index 1ae85a2cc6..2b8fcee9fd 100644
--- a/source/dnode/mnode/impl/src/mndStreamUtil.c
+++ b/source/dnode/mnode/impl/src/mndStreamUtil.c
@@ -462,14 +462,22 @@ static int32_t doBuildStreamTaskUpdateMsg(void **pBuf, int32_t *pLen, SVgroupCha
return TSDB_CODE_SUCCESS;
}
-static int32_t doSetUpdateTaskAction(STrans *pTrans, SStreamTask *pTask, SVgroupChangeInfo *pInfo) {
+static int32_t doSetUpdateTaskAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTask, SVgroupChangeInfo *pInfo) {
void *pBuf = NULL;
int32_t len = 0;
streamTaskUpdateEpsetInfo(pTask, pInfo->pUpdateNodeList);
doBuildStreamTaskUpdateMsg(&pBuf, &len, pInfo, pTask->info.nodeId, &pTask->id, pTrans->id);
- int32_t code = setTransAction(pTrans, pBuf, len, TDMT_VND_STREAM_TASK_UPDATE, &pTask->info.epSet, 0);
+ SEpSet epset = {0};
+ bool hasEpset = false;
+ int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId);
+ if (code != TSDB_CODE_SUCCESS || !hasEpset) {
+ terrno = code;
+ return code;
+ }
+
+ code = setTransAction(pTrans, pBuf, len, TDMT_VND_STREAM_TASK_UPDATE, &epset, TSDB_CODE_VND_INVALID_VGROUP_ID);
if (code != TSDB_CODE_SUCCESS) {
taosMemoryFree(pBuf);
}
@@ -478,14 +486,14 @@ static int32_t doSetUpdateTaskAction(STrans *pTrans, SStreamTask *pTask, SVgroup
}
// build trans to update the epset
-int32_t mndStreamSetUpdateEpsetAction(SStreamObj *pStream, SVgroupChangeInfo *pInfo, STrans *pTrans) {
+int32_t mndStreamSetUpdateEpsetAction(SMnode *pMnode, SStreamObj *pStream, SVgroupChangeInfo *pInfo, STrans *pTrans) {
mDebug("stream:0x%" PRIx64 " set tasks epset update action", pStream->uid);
taosWLockLatch(&pStream->lock);
SStreamTaskIter *pIter = createStreamTaskIter(pStream);
while (streamTaskIterNextTask(pIter)) {
SStreamTask *pTask = streamTaskIterGetCurrent(pIter);
- int32_t code = doSetUpdateTaskAction(pTrans, pTask, pInfo);
+ int32_t code = doSetUpdateTaskAction(pMnode, pTrans, pTask, pInfo);
if (code != TSDB_CODE_SUCCESS) {
destroyStreamTaskIter(pIter);
taosWUnLockLatch(&pStream->lock);
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index 011e62cb89..47900d540c 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -917,6 +917,22 @@ static void doStartFillhistoryStep2(SStreamTask* pTask, SStreamTask* pStreamTask
}
}
+int32_t handleStep2Async(SStreamTask* pStreamTask, void* param) {
+ STQ* pTq = param;
+
+ SStreamMeta* pMeta = pStreamTask->pMeta;
+ STaskId hId = pStreamTask->hTaskInfo.id;
+ SStreamTask* pTask = streamMetaAcquireTask(pStreamTask->pMeta, hId.streamId, hId.taskId);
+ if (pTask == NULL) {
+ // todo handle error
+ }
+
+ doStartFillhistoryStep2(pTask, pStreamTask, pTq);
+
+ streamMetaReleaseTask(pMeta, pTask);
+ return 0;
+}
+
// this function should be executed by only one thread, so we set an sentinel to protect this function
int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
SStreamScanHistoryReq* pReq = (SStreamScanHistoryReq*)pMsg->pCont;
@@ -1007,37 +1023,27 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
// the following procedure should be executed, no matter status is stop/pause or not
tqDebug("s-task:%s scan-history(step 1) ended, elapsed time:%.2fs", id, pTask->execInfo.step1El);
- if (pTask->info.fillHistory) {
- SStreamTask* pStreamTask = NULL;
+ ASSERT(pTask->info.fillHistory == 1);
- // 1. get the related stream task
- pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId);
- if (pStreamTask == NULL) {
- tqError("failed to find s-task:0x%" PRIx64 ", it may have been destroyed, drop related fill-history task:%s",
- pTask->streamTaskId.taskId, pTask->id.idStr);
+ // 1. get the related stream task
+ SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId);
+ if (pStreamTask == NULL) {
+ tqError("failed to find s-task:0x%" PRIx64 ", it may have been destroyed, drop related fill-history task:%s",
+ pTask->streamTaskId.taskId, pTask->id.idStr);
tqDebug("s-task:%s fill-history task set status to be dropping and drop it", id);
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &pTask->id, 0);
- atomic_store_32(&pTask->status.inScanHistorySentinel, 0);
- streamMetaReleaseTask(pMeta, pTask);
- return -1;
- }
-
- ASSERT(pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE);
-
- code = streamTaskHandleEvent(pStreamTask->status.pSM, TASK_EVENT_HALT);
- if (code == TSDB_CODE_SUCCESS) {
- doStartFillhistoryStep2(pTask, pStreamTask, pTq);
- } else {
- tqError("s-task:%s failed to halt s-task:%s, not launch step2", id, pStreamTask->id.idStr);
- }
-
- streamMetaReleaseTask(pMeta, pStreamTask);
- } else {
- ASSERT(0);
+ atomic_store_32(&pTask->status.inScanHistorySentinel, 0);
+ streamMetaReleaseTask(pMeta, pTask);
+ return -1;
}
+ ASSERT(pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE);
+ code = streamTaskHandleEventAsync(pStreamTask->status.pSM, TASK_EVENT_HALT, handleStep2Async, pTq);
+
+ streamMetaReleaseTask(pMeta, pStreamTask);
+
atomic_store_32(&pTask->status.inScanHistorySentinel, 0);
streamMetaReleaseTask(pMeta, pTask);
return code;
diff --git a/source/dnode/vnode/src/tq/tqStreamTask.c b/source/dnode/vnode/src/tq/tqStreamTask.c
index 280c110711..73508202d9 100644
--- a/source/dnode/vnode/src/tq/tqStreamTask.c
+++ b/source/dnode/vnode/src/tq/tqStreamTask.c
@@ -28,8 +28,8 @@ static int32_t tqScanWalInFuture(STQ* pTq, int32_t numOfTasks, int32_t idleDurat
// extract data blocks(submit/delete) from WAL, and add them into the input queue for all the sources tasks.
int32_t tqScanWal(STQ* pTq) {
- int32_t vgId = TD_VID(pTq->pVnode);
SStreamMeta* pMeta = pTq->pStreamMeta;
+ int32_t vgId = pMeta->vgId;
int64_t st = taosGetTimestampMs();
tqDebug("vgId:%d continue to check if data in wal are available, scanCounter:%d", vgId, pMeta->scanInfo.scanCounter);
diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c
index dad1211326..6029575e2c 100644
--- a/source/dnode/vnode/src/tq/tqUtil.c
+++ b/source/dnode/vnode/src/tq/tqUtil.c
@@ -263,8 +263,7 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
} else if (pHead->msgType == TDMT_VND_CREATE_STB || pHead->msgType == TDMT_VND_ALTER_STB) {
PROCESS_EXCLUDED_MSG(SVCreateStbReq, tDecodeSVCreateStbReq)
} else if (pHead->msgType == TDMT_VND_DELETE) {
- fetchVer++;
- continue;
+ PROCESS_EXCLUDED_MSG(SDeleteRes, tDecodeDeleteRes)
}
}
diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c
index a2d45062b9..9bfdd70477 100644
--- a/source/dnode/vnode/src/tqCommon/tqCommon.c
+++ b/source/dnode/vnode/src/tqCommon/tqCommon.c
@@ -142,8 +142,10 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
ppHTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id));
if (ppHTask == NULL || *ppHTask == NULL) {
- tqError("vgId:%d failed to acquire fill-history task:0x%x when handling update, it may have been dropped already",
- vgId, req.taskId);
+ tqError(
+ "vgId:%d failed to acquire fill-history task:0x%x when handling update, may have been dropped already, rel "
+ "stream task:0x%x",
+ vgId, (uint32_t)pTask->hTaskInfo.id.taskId, req.taskId);
CLEAR_RELATED_FILLHISTORY_TASK(pTask);
} else {
tqDebug("s-task:%s fill-history task update nodeEp along with stream task", (*ppHTask)->id.idStr);
@@ -612,23 +614,35 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve
int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen) {
SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg;
+ int32_t vgId = pMeta->vgId;
+ STaskId hTaskId = {0};
- int32_t vgId = pMeta->vgId;
tqDebug("vgId:%d receive msg to drop s-task:0x%x", vgId, pReq->taskId);
- SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId);
- if (pTask != NULL) {
- // drop the related fill-history task firstly
+ streamMetaWLock(pMeta);
+
+ STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId};
+ SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
+ if ((ppTask != NULL) && ((*ppTask) != NULL)) {
+ streamMetaAcquireOneTask(*ppTask);
+ SStreamTask* pTask = *ppTask;
+
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
- STaskId* pHTaskId = &pTask->hTaskInfo.id;
- streamMetaUnregisterTask(pMeta, pHTaskId->streamId, pHTaskId->taskId);
- tqDebug("s-task:0x%x vgId:%d drop fill-history task:0x%x firstly", pReq->taskId, vgId,
- (int32_t)pHTaskId->taskId);
+ hTaskId.streamId = pTask->hTaskInfo.id.streamId;
+ hTaskId.taskId = pTask->hTaskInfo.id.taskId;
}
+
+ streamTaskClearHTaskAttr(pTask, pReq->resetRelHalt);
streamMetaReleaseTask(pMeta, pTask);
}
- streamTaskClearHTaskAttr(pTask, pReq->resetRelHalt, true);
+ streamMetaWUnLock(pMeta);
+
+ // drop the related fill-history task firstly
+ if (hTaskId.taskId != 0 && hTaskId.streamId != 0) {
+ streamMetaUnregisterTask(pMeta, hTaskId.streamId, hTaskId.taskId);
+ tqDebug("s-task:0x%x vgId:%d drop rel fill-history task:0x%x firstly", pReq->taskId, vgId, (int32_t)hTaskId.taskId);
+ }
// drop the stream task now
streamMetaUnregisterTask(pMeta, pReq->streamId, pReq->taskId);
@@ -865,7 +879,7 @@ int32_t tqStreamTaskProcessTaskPauseReq(SStreamMeta* pMeta, char* pMsg){
pHistoryTask = streamMetaAcquireTask(pMeta, pTask->hTaskInfo.id.streamId, pTask->hTaskInfo.id.taskId);
if (pHistoryTask == NULL) {
tqError("vgId:%d process pause req, failed to acquire fill-history task:0x%" PRIx64
- ", it may have been dropped already",
+ ", it may have been dropped already",
pMeta->vgId, pTask->hTaskInfo.id.taskId);
streamMetaReleaseTask(pMeta, pTask);
diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c
index 1ef2a451a7..0848fd0076 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCache.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCache.c
@@ -789,25 +789,6 @@ int32_t tsdbCacheDropSTableColumn(STsdb *pTsdb, SArray *uids, int16_t cid, int8_
return code;
}
-static SLastCol *tsdbCacheLookup(STsdb *pTsdb, tb_uid_t uid, int16_t cid, int8_t ltype) {
- SLastCol *pLastCol = NULL;
-
- char *err = NULL;
- size_t vlen = 0;
- SLastKey *key = &(SLastKey){.ltype = ltype, .uid = uid, .cid = cid};
- size_t klen = ROCKS_KEY_LEN;
- char *value = NULL;
- value = rocksdb_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, (char *)key, klen, &vlen, &err);
- if (NULL != err) {
- tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
- rocksdb_free(err);
- }
-
- pLastCol = tsdbCacheDeserialize(value);
-
- return pLastCol;
-}
-
typedef struct {
int idx;
SLastKey key;
@@ -1052,6 +1033,25 @@ static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SC
static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SCacheRowsReader *pr, int16_t *aCols,
int nCols, int16_t *slotIds);
#ifdef BUILD_NO_CALL
+static SLastCol *tsdbCacheLookup(STsdb *pTsdb, tb_uid_t uid, int16_t cid, int8_t ltype) {
+ SLastCol *pLastCol = NULL;
+
+ char *err = NULL;
+ size_t vlen = 0;
+ SLastKey *key = &(SLastKey){.ltype = ltype, .uid = uid, .cid = cid};
+ size_t klen = ROCKS_KEY_LEN;
+ char *value = NULL;
+ value = rocksdb_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, (char *)key, klen, &vlen, &err);
+ if (NULL != err) {
+ tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
+ rocksdb_free(err);
+ }
+
+ pLastCol = tsdbCacheDeserialize(value);
+
+ return pLastCol;
+}
+
int32_t tsdbCacheGetSlow(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int8_t ltype) {
rocksdb_writebatch_t *wb = NULL;
int32_t code = 0;
@@ -1233,10 +1233,10 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
int16_t *lastSlotIds = taosMemoryMalloc(num_keys * sizeof(int16_t));
int16_t *lastrowColIds = taosMemoryMalloc(num_keys * sizeof(int16_t));
int16_t *lastrowSlotIds = taosMemoryMalloc(num_keys * sizeof(int16_t));
- SArray* lastTmpColArray = NULL;
- SArray* lastTmpIndexArray = NULL;
- SArray* lastrowTmpColArray = NULL;
- SArray* lastrowTmpIndexArray = NULL;
+ SArray *lastTmpColArray = NULL;
+ SArray *lastTmpIndexArray = NULL;
+ SArray *lastrowTmpColArray = NULL;
+ SArray *lastrowTmpIndexArray = NULL;
int lastIndex = 0;
int lastrowIndex = 0;
@@ -1245,7 +1245,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
SIdxKey *idxKey = taosArrayGet(remainCols, i);
slotIds[i] = pr->pSlotIds[idxKey->idx];
if (idxKey->key.ltype == CACHESCAN_RETRIEVE_LAST >> 3) {
- if(NULL == lastTmpIndexArray) {
+ if (NULL == lastTmpIndexArray) {
lastTmpIndexArray = taosArrayInit(num_keys, sizeof(int32_t));
}
taosArrayPush(lastTmpIndexArray, &(i));
@@ -1253,7 +1253,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
lastSlotIds[lastIndex] = pr->pSlotIds[idxKey->idx];
lastIndex++;
} else {
- if(NULL == lastrowTmpIndexArray) {
+ if (NULL == lastrowTmpIndexArray) {
lastrowTmpIndexArray = taosArrayInit(num_keys, sizeof(int32_t));
}
taosArrayPush(lastrowTmpIndexArray, &(i));
@@ -1265,17 +1265,18 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
pTmpColArray = taosArrayInit(lastIndex + lastrowIndex, sizeof(SLastCol));
- if(lastTmpIndexArray != NULL) {
+ if (lastTmpIndexArray != NULL) {
mergeLastCid(uid, pTsdb, &lastTmpColArray, pr, lastColIds, lastIndex, lastSlotIds);
- for(int i = 0; i < taosArrayGetSize(lastTmpColArray); i++) {
- taosArrayInsert(pTmpColArray, *(int32_t*)taosArrayGet(lastTmpIndexArray, i), taosArrayGet(lastTmpColArray, i));
+ for (int i = 0; i < taosArrayGetSize(lastTmpColArray); i++) {
+ taosArrayInsert(pTmpColArray, *(int32_t *)taosArrayGet(lastTmpIndexArray, i), taosArrayGet(lastTmpColArray, i));
}
}
- if(lastrowTmpIndexArray != NULL) {
+ if (lastrowTmpIndexArray != NULL) {
mergeLastRowCid(uid, pTsdb, &lastrowTmpColArray, pr, lastrowColIds, lastrowIndex, lastrowSlotIds);
- for(int i = 0; i < taosArrayGetSize(lastrowTmpColArray); i++) {
- taosArrayInsert(pTmpColArray, *(int32_t*)taosArrayGet(lastrowTmpIndexArray, i), taosArrayGet(lastrowTmpColArray, i));
+ for (int i = 0; i < taosArrayGetSize(lastrowTmpColArray); i++) {
+ taosArrayInsert(pTmpColArray, *(int32_t *)taosArrayGet(lastrowTmpIndexArray, i),
+ taosArrayGet(lastrowTmpColArray, i));
}
}
diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c
index 5f4b7b8442..f9f2ae6b21 100644
--- a/source/dnode/vnode/src/vnd/vnodeSync.c
+++ b/source/dnode/vnode/src/vnd/vnodeSync.c
@@ -372,8 +372,8 @@ int32_t vnodeProcessSyncMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
int32_t code = syncProcessMsg(pVnode->sync, pMsg);
if (code != 0) {
- vGError("vgId:%d, failed to process sync msg:%p type:%s since %s", pVnode->config.vgId, pMsg,
- TMSG_INFO(pMsg->msgType), terrstr());
+ vGError("vgId:%d, failed to process sync msg:%p type:%s, errno: %s, code:0x%x", pVnode->config.vgId, pMsg,
+ TMSG_INFO(pMsg->msgType), terrstr(), code);
}
return code;
diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c
index 9ff903cdb9..10220426a3 100644
--- a/source/libs/executor/src/tsort.c
+++ b/source/libs/executor/src/tsort.c
@@ -1199,6 +1199,18 @@ static int32_t createBlocksMergeSortInitialSources(SSortHandle* pHandle) {
return code;
}
+static void freeSSortSource(SSortSource* source) {
+ if (NULL == source) return;
+ if (source->param && !source->onlyRef) {
+ taosMemoryFree(source->param);
+ }
+ if (!source->onlyRef && source->src.pBlock) {
+ blockDataDestroy(source->src.pBlock);
+ source->src.pBlock = NULL;
+ }
+ taosMemoryFree(source);
+}
+
static int32_t createBlocksQuickSortInitialSources(SSortHandle* pHandle) {
int32_t code = 0;
size_t sortBufSize = pHandle->numOfPages * pHandle->pageSize;
@@ -1231,14 +1243,7 @@ static int32_t createBlocksQuickSortInitialSources(SSortHandle* pHandle) {
code = blockDataMerge(pHandle->pDataBlock, pBlock);
if (code != TSDB_CODE_SUCCESS) {
- if (source->param && !source->onlyRef) {
- taosMemoryFree(source->param);
- }
- if (!source->onlyRef && source->src.pBlock) {
- blockDataDestroy(source->src.pBlock);
- source->src.pBlock = NULL;
- }
- taosMemoryFree(source);
+ freeSSortSource(source);
return code;
}
@@ -1248,15 +1253,7 @@ static int32_t createBlocksQuickSortInitialSources(SSortHandle* pHandle) {
int64_t p = taosGetTimestampUs();
code = blockDataSort(pHandle->pDataBlock, pHandle->pSortInfo);
if (code != 0) {
- if (source->param && !source->onlyRef) {
- taosMemoryFree(source->param);
- }
- if (!source->onlyRef && source->src.pBlock) {
- blockDataDestroy(source->src.pBlock);
- source->src.pBlock = NULL;
- }
-
- taosMemoryFree(source);
+ freeSSortSource(source);
return code;
}
@@ -1265,16 +1262,13 @@ static int32_t createBlocksQuickSortInitialSources(SSortHandle* pHandle) {
if (pHandle->pqMaxRows > 0) blockDataKeepFirstNRows(pHandle->pDataBlock, pHandle->pqMaxRows);
code = doAddToBuf(pHandle->pDataBlock, pHandle);
if (code != TSDB_CODE_SUCCESS) {
+ freeSSortSource(source);
return code;
}
}
}
- if (source->param && !source->onlyRef) {
- taosMemoryFree(source->param);
- }
-
- taosMemoryFree(source);
+ freeSSortSource(source);
if (pHandle->pDataBlock != NULL && pHandle->pDataBlock->info.rows > 0) {
size_t size = blockDataGetSize(pHandle->pDataBlock);
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index cc8fafa159..0392d486e0 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -4349,6 +4349,26 @@ static int32_t translateEventWindow(STranslateContext* pCxt, SSelectStmt* pSelec
}
static int32_t translateCountWindow(STranslateContext* pCxt, SSelectStmt* pSelect) {
+ SCountWindowNode* pCountWin = (SCountWindowNode*)pSelect->pWindow;
+ if (pCountWin->windowCount <= 1) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
+ "Size of Count window must exceed 1.");
+ }
+
+ if (pCountWin->windowSliding <= 0) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
+ "Size of Count window must exceed 0.");
+ }
+
+ if (pCountWin->windowSliding > pCountWin->windowCount) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
+ "sliding value no larger than the count value.");
+ }
+
+ if (pCountWin->windowCount > INT32_MAX) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
+ "Size of Count window must less than 2147483647(INT32_MAX).");
+ }
if (QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) &&
!isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TIMELINE_QUERY,
@@ -8266,29 +8286,7 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm
if (pStmt->pOptions->ignoreExpired != 1) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
"Ignore expired data of Count window must be 1.");
- }
-
- SCountWindowNode* pCountWin = (SCountWindowNode*)pSelect->pWindow;
- if (pCountWin->windowCount <= 1) {
- return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
- "Size of Count window must exceed 1.");
}
-
- if (pCountWin->windowSliding <= 0) {
- return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
- "Size of Count window must exceed 0.");
- }
-
- if (pCountWin->windowSliding > pCountWin->windowCount) {
- return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
- "sliding value no larger than the count value.");
- }
-
- if (pCountWin->windowCount > INT32_MAX) {
- return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
- "Size of Count window must less than 2147483647(INT32_MAX).");
- }
-
}
return TSDB_CODE_SUCCESS;
diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c
index 016b6054f6..d11d39d0e0 100644
--- a/source/libs/planner/src/planSpliter.c
+++ b/source/libs/planner/src/planSpliter.c
@@ -274,7 +274,7 @@ static bool stbSplNeedSplitWindow(bool streamQuery, SLogicNode* pNode) {
}
}
- if (WINDOW_TYPE_STATE == pWindow->winType) {
+ if (WINDOW_TYPE_STATE == pWindow->winType || WINDOW_TYPE_COUNT == pWindow->winType) {
if (!streamQuery) {
return stbSplHasMultiTbScan(streamQuery, pNode);
} else {
diff --git a/source/libs/qworker/src/qwMsg.c b/source/libs/qworker/src/qwMsg.c
index 66ec460861..faa90dcbf8 100644
--- a/source/libs/qworker/src/qwMsg.c
+++ b/source/libs/qworker/src/qwMsg.c
@@ -715,6 +715,7 @@ int32_t qWorkerProcessDeleteMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, SD
uint64_t tId = req.taskId;
int64_t rId = 0;
int32_t eId = -1;
+ pRes->source = req.source;
SQWMsg qwMsg = {.node = node, .msg = req.msg, .msgLen = req.phyLen, .connInfo = pMsg->info};
QW_SCH_TASK_DLOG("processDelete start, node:%p, handle:%p, sql:%s", node, pMsg->info.handle, req.sql);
diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h
index 1aa21e73a3..d129b0024f 100644
--- a/source/libs/scheduler/inc/schInt.h
+++ b/source/libs/scheduler/inc/schInt.h
@@ -304,6 +304,7 @@ typedef struct SSchJob {
SSchResInfo userRes;
char *sql;
SQueryProfileSummary summary;
+ int8_t source;
} SSchJob;
typedef struct SSchTaskCtx {
diff --git a/source/libs/scheduler/src/schJob.c b/source/libs/scheduler/src/schJob.c
index e50ec64d54..48aab63ba3 100644
--- a/source/libs/scheduler/src/schJob.c
+++ b/source/libs/scheduler/src/schJob.c
@@ -746,6 +746,7 @@ int32_t schInitJob(int64_t *pJobId, SSchedulerReq *pReq) {
pJob->chkKillParam = pReq->chkKillParam;
pJob->userRes.execFp = pReq->execFp;
pJob->userRes.cbParam = pReq->cbParam;
+ pJob->source = pReq->source;
if (pReq->pNodeList == NULL || taosArrayGetSize(pReq->pNodeList) <= 0) {
qDebug("QID:0x%" PRIx64 " input exec nodeList is empty", pReq->pDag->queryId);
diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c
index 1c0b31109e..39273ffa50 100644
--- a/source/libs/scheduler/src/schRemote.c
+++ b/source/libs/scheduler/src/schRemote.c
@@ -1086,6 +1086,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
req.sqlLen = strlen(pJob->sql);
req.sql = (char *)pJob->sql;
req.msg = pTask->msg;
+ req.source = pJob->source;
msgSize = tSerializeSVDeleteReq(NULL, 0, &req);
msg = taosMemoryCalloc(1, msgSize);
if (NULL == msg) {
diff --git a/source/libs/stream/inc/streamInt.h b/source/libs/stream/inc/streamInt.h
index 87f63b48ed..d0055d5400 100644
--- a/source/libs/stream/inc/streamInt.h
+++ b/source/libs/stream/inc/streamInt.h
@@ -99,6 +99,7 @@ void streamRetryDispatchData(SStreamTask* pTask, int64_t waitDuration);
int32_t streamDispatchStreamBlock(SStreamTask* pTask);
void destroyDispatchMsg(SStreamDispatchReq* pReq, int32_t numOfVgroups);
int32_t getNumOfDispatchBranch(SStreamTask* pTask);
+void clearBufferedDispatchMsg(SStreamTask* pTask);
int32_t streamProcessCheckpointBlock(SStreamTask* pTask, SStreamDataBlock* pBlock);
SStreamDataBlock* createStreamBlockFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t blockType, int32_t srcVg);
diff --git a/source/libs/stream/inc/streamsm.h b/source/libs/stream/inc/streamsm.h
index 22e1c4497b..47e0ce1b55 100644
--- a/source/libs/stream/inc/streamsm.h
+++ b/source/libs/stream/inc/streamsm.h
@@ -26,21 +26,21 @@ extern "C" {
typedef int32_t (*__state_trans_fn)(SStreamTask*);
typedef int32_t (*__state_trans_succ_fn)(SStreamTask*);
-typedef struct SAttachedEventInfo {
+typedef struct SFutureHandleEventInfo {
ETaskStatus status; // required status that this event can be handled
EStreamTaskEvent event; // the delayed handled event
void* pParam;
- void* pFn;
-} SAttachedEventInfo;
+ __state_trans_user_fn callBackFn;
+} SFutureHandleEventInfo;
typedef struct STaskStateTrans {
- bool autoInvokeEndFn;
- SStreamTaskState state;
- EStreamTaskEvent event;
- SStreamTaskState next;
- __state_trans_fn pAction;
- __state_trans_succ_fn pSuccAction;
- SAttachedEventInfo attachEvent;
+ bool autoInvokeEndFn;
+ SStreamTaskState state;
+ EStreamTaskEvent event;
+ SStreamTaskState next;
+ __state_trans_fn pAction;
+ __state_trans_succ_fn pSuccAction;
+ SFutureHandleEventInfo attachEvent;
} STaskStateTrans;
struct SStreamTaskSM {
diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c
index 78b914c3db..dc790b5b2d 100644
--- a/source/libs/stream/src/streamDispatch.c
+++ b/source/libs/stream/src/streamDispatch.c
@@ -315,6 +315,16 @@ int32_t getNumOfDispatchBranch(SStreamTask* pTask) {
: taosArrayGetSize(pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos);
}
+void clearBufferedDispatchMsg(SStreamTask* pTask) {
+ SDispatchMsgInfo* pMsgInfo = &pTask->msgInfo;
+ if (pMsgInfo->pData != NULL) {
+ destroyDispatchMsg(pMsgInfo->pData, getNumOfDispatchBranch(pTask));
+ }
+
+ pMsgInfo->pData = NULL;
+ pMsgInfo->dispatchMsgType = 0;
+}
+
static int32_t doBuildDispatchMsg(SStreamTask* pTask, const SStreamDataBlock* pData) {
int32_t code = 0;
int32_t numOfBlocks = taosArrayGetSize(pData->blocks);
@@ -678,8 +688,7 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) {
// todo deal with only partially success dispatch case
atomic_store_32(&pTask->outputInfo.shuffleDispatcher.waitingRspCnt, 0);
if (terrno == TSDB_CODE_APP_IS_STOPPING) { // in case of this error, do not retry anymore
- destroyDispatchMsg(pTask->msgInfo.pData, getNumOfDispatchBranch(pTask));
- pTask->msgInfo.pData = NULL;
+ clearBufferedDispatchMsg(pTask);
return code;
}
@@ -740,6 +749,8 @@ int32_t streamTaskSendCheckpointSourceRsp(SStreamTask* pTask) {
int32_t streamAddBlockIntoDispatchMsg(const SSDataBlock* pBlock, SStreamDispatchReq* pReq) {
int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock);
+ ASSERT(dataStrLen > 0);
+
void* buf = taosMemoryCalloc(1, dataStrLen);
if (buf == NULL) return -1;
@@ -936,15 +947,12 @@ void streamClearChkptReadyMsg(SStreamTask* pTask) {
// this message has been sent successfully, let's try next one.
static int32_t handleDispatchSuccessRsp(SStreamTask* pTask, int32_t downstreamId) {
stDebug("s-task:%s destroy dispatch msg:%p", pTask->id.idStr, pTask->msgInfo.pData);
- destroyDispatchMsg(pTask->msgInfo.pData, getNumOfDispatchBranch(pTask));
-
bool delayDispatch = (pTask->msgInfo.dispatchMsgType == STREAM_INPUT__CHECKPOINT_TRIGGER);
if (delayDispatch) {
pTask->chkInfo.dispatchCheckpointTrigger = true;
}
- pTask->msgInfo.pData = NULL;
- pTask->msgInfo.dispatchMsgType = 0;
+ clearBufferedDispatchMsg(pTask);
int64_t el = taosGetTimestampMs() - pTask->msgInfo.startTs;
@@ -1084,7 +1092,7 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
} else { // this message has been sent successfully, let's try next one.
pTask->msgInfo.retryCount = 0;
- // transtate msg has been sent to downstream successfully. let's transfer the fill-history task state
+ // trans-state msg has been sent to downstream successfully. let's transfer the fill-history task state
if (pTask->msgInfo.dispatchMsgType == STREAM_INPUT__TRANS_STATE) {
stDebug("s-task:%s dispatch transtate msgId:%d to downstream successfully, start to transfer state", id, msgId);
ASSERT(pTask->info.fillHistory == 1);
@@ -1093,6 +1101,8 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
if (code != TSDB_CODE_SUCCESS) { // todo: do nothing if error happens
}
+ clearBufferedDispatchMsg(pTask);
+
// now ready for next data output
atomic_store_8(&pTask->outputq.status, TASK_OUTPUT_STATUS__NORMAL);
} else {
diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c
index bac6022834..27cd98aac6 100644
--- a/source/libs/stream/src/streamExec.c
+++ b/source/libs/stream/src/streamExec.c
@@ -410,6 +410,12 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) {
return TSDB_CODE_SUCCESS;
}
+static int32_t haltCallback(SStreamTask* pTask, void* param) {
+ streamTaskOpenAllUpstreamInput(pTask);
+ streamTaskSendCheckpointReq(pTask);
+ return TSDB_CODE_SUCCESS;
+}
+
int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
int32_t code = TSDB_CODE_SUCCESS;
SStreamMeta* pMeta = pTask->pMeta;
@@ -419,11 +425,12 @@ int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
int32_t level = pTask->info.taskLevel;
if (level == TASK_LEVEL__AGG || level == TASK_LEVEL__SOURCE) { // do transfer task operator states.
code = streamDoTransferStateToStreamTask(pTask);
- } else { // no state transfer for sink tasks, and drop fill-history task, followed by opening inputQ of sink task.
+ } else {
+ // no state transfer for sink tasks, and drop fill-history task, followed by opening inputQ of sink task.
SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId);
if (pStreamTask != NULL) {
// halt the related stream sink task
- code = streamTaskHandleEvent(pStreamTask->status.pSM, TASK_EVENT_HALT);
+ code = streamTaskHandleEventAsync(pStreamTask->status.pSM, TASK_EVENT_HALT, haltCallback, NULL);
if (code != TSDB_CODE_SUCCESS) {
stError("s-task:%s halt stream task:%s failed, code:%s not transfer state to stream task", pTask->id.idStr,
pStreamTask->id.idStr, tstrerror(code));
@@ -432,9 +439,6 @@ int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
} else {
stDebug("s-task:%s halt by related fill-history task:%s", pStreamTask->id.idStr, pTask->id.idStr);
}
-
- streamTaskOpenAllUpstreamInput(pStreamTask);
- streamTaskSendCheckpointReq(pStreamTask);
streamMetaReleaseTask(pMeta, pStreamTask);
}
}
diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c
index a09b940a19..a072ee1f6f 100644
--- a/source/libs/stream/src/streamMeta.c
+++ b/source/libs/stream/src/streamMeta.c
@@ -669,6 +669,13 @@ static void doRemoveIdFromList(SStreamMeta* pMeta, int32_t num, SStreamTaskId* i
}
}
+static int32_t streamTaskSendTransSuccessMsg(SStreamTask* pTask, void* param) {
+ if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
+ streamTaskSendCheckpointSourceRsp(pTask);
+ }
+ return 0;
+}
+
int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) {
SStreamTask* pTask = NULL;
@@ -687,7 +694,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
}
// handle the dropping event
- streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_DROPPING);
+ streamTaskHandleEventAsync(pTask->status.pSM, TASK_EVENT_DROPPING, streamTaskSendTransSuccessMsg, NULL);
} else {
stDebug("vgId:%d failed to find the task:0x%x, it may be dropped already", pMeta->vgId, taskId);
streamMetaWUnLock(pMeta);
diff --git a/source/libs/stream/src/streamSessionState.c b/source/libs/stream/src/streamSessionState.c
index 3d0241df75..723f04c499 100644
--- a/source/libs/stream/src/streamSessionState.c
+++ b/source/libs/stream/src/streamSessionState.c
@@ -156,6 +156,7 @@ int32_t getSessionWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey,
(*pVal) = pPos;
SSessionKey* pDestWinKey = (SSessionKey*)pPos->pKey;
pPos->beUsed = true;
+ pPos->beFlushed = false;
*pKey = *pDestWinKey;
goto _end;
}
@@ -167,6 +168,7 @@ int32_t getSessionWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey,
(*pVal) = pPos;
SSessionKey* pDestWinKey = (SSessionKey*)pPos->pKey;
pPos->beUsed = true;
+ pPos->beFlushed = false;
*pKey = *pDestWinKey;
goto _end;
}
@@ -380,6 +382,14 @@ static SStreamStateCur* seekKeyCurrentPrev_buff(SStreamFileState* pFileState, co
(*pWins) = pWinStates;
}
+ if (size > 0 && index == -1) {
+ SRowBuffPos* pPos = taosArrayGetP(pWinStates, 0);
+ SSessionKey* pWin = (SSessionKey*)pPos->pKey;
+ if (pWinKey->win.skey == pWin->win.skey) {
+ index = 0;
+ }
+ }
+
if (index >= 0) {
pCur = createSessionStateCursor(pFileState);
pCur->buffIndex = index;
@@ -387,6 +397,7 @@ static SStreamStateCur* seekKeyCurrentPrev_buff(SStreamFileState* pFileState, co
*pIndex = index;
}
}
+
return pCur;
}
@@ -666,6 +677,7 @@ int32_t getStateWinResultBuff(SStreamFileState* pFileState, SSessionKey* key, ch
(*pVal) = pPos;
SSessionKey* pDestWinKey = (SSessionKey*)pPos->pKey;
pPos->beUsed = true;
+ pPos->beFlushed = false;
*key = *pDestWinKey;
goto _end;
}
@@ -679,6 +691,7 @@ int32_t getStateWinResultBuff(SStreamFileState* pFileState, SSessionKey* key, ch
(*pVal) = pPos;
SSessionKey* pDestWinKey = (SSessionKey*)pPos->pKey;
pPos->beUsed = true;
+ pPos->beFlushed = false;
*key = *pDestWinKey;
goto _end;
}
@@ -771,6 +784,7 @@ int32_t getCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, C
(*pVal) = pPos;
SSessionKey* pDestWinKey = (SSessionKey*)pPos->pKey;
pPos->beUsed = true;
+ pPos->beFlushed = false;
*pWinKey = *pDestWinKey;
goto _end;
}
@@ -799,6 +813,7 @@ int32_t getCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, C
(*pVal) = pPos;
SSessionKey* pDestWinKey = (SSessionKey*)pPos->pKey;
pPos->beUsed = true;
+ pPos->beFlushed = false;
*pWinKey = *pDestWinKey;
goto _end;
}
diff --git a/source/libs/stream/src/streamStart.c b/source/libs/stream/src/streamStart.c
index 6112a208c6..cb340ade32 100644
--- a/source/libs/stream/src/streamStart.c
+++ b/source/libs/stream/src/streamStart.c
@@ -385,7 +385,7 @@ int32_t streamTaskOnScanhistoryTaskReady(SStreamTask* pTask) {
void doProcessDownstreamReadyRsp(SStreamTask* pTask) {
EStreamTaskEvent event = (pTask->info.fillHistory == 0) ? TASK_EVENT_INIT : TASK_EVENT_INIT_SCANHIST;
- streamTaskOnHandleEventSuccess(pTask->status.pSM, event);
+ streamTaskOnHandleEventSuccess(pTask->status.pSM, event, NULL, NULL);
int64_t initTs = pTask->execInfo.init;
int64_t startTs = pTask->execInfo.start;
diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c
index 9639921c77..45a9a68d3d 100644
--- a/source/libs/stream/src/streamTask.c
+++ b/source/libs/stream/src/streamTask.c
@@ -39,7 +39,7 @@ static int32_t doUpdateTaskEpset(SStreamTask* pTask, int32_t nodeId, SEpSet* pEp
stDebug("s-task:0x%x (vgId:%d) self node epset is updated %s", pTask->id.taskId, nodeId, buf);
}
- // check for the dispath info and the upstream task info
+ // check for the dispatch info and the upstream task info
int32_t level = pTask->info.taskLevel;
if (level == TASK_LEVEL__SOURCE) {
streamTaskUpdateDownstreamInfo(pTask, nodeId, pEpSet);
@@ -412,9 +412,7 @@ void tFreeStreamTask(SStreamTask* pTask) {
pTask->pReadyMsgList = taosArrayDestroy(pTask->pReadyMsgList);
if (pTask->msgInfo.pData != NULL) {
- destroyDispatchMsg(pTask->msgInfo.pData, getNumOfDispatchBranch(pTask));
- pTask->msgInfo.pData = NULL;
- pTask->msgInfo.dispatchMsgType = 0;
+ clearBufferedDispatchMsg(pTask);
}
if (pTask->outputInfo.type == TASK_OUTPUT__TABLE) {
@@ -624,6 +622,7 @@ void streamTaskSetFixedDownstreamInfo(SStreamTask* pTask, const SStreamTask* pDo
void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet) {
char buf[512] = {0};
EPSET_TO_STR(pEpSet, buf);
+ int32_t id = pTask->id.taskId;
int8_t type = pTask->outputInfo.type;
if (type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
@@ -635,8 +634,8 @@ void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SE
if (pVgInfo->vgId == nodeId) {
epsetAssign(&pVgInfo->epSet, pEpSet);
- stDebug("s-task:0x%x update the dispatch info, task:0x%x(nodeId:%d) newEpset:%s", pTask->id.taskId,
- pVgInfo->taskId, nodeId, buf);
+ stDebug("s-task:0x%x update the dispatch info, task:0x%x(nodeId:%d) newEpset:%s", id, pVgInfo->taskId, nodeId,
+ buf);
break;
}
}
@@ -644,11 +643,9 @@ void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SE
STaskDispatcherFixed* pDispatcher = &pTask->outputInfo.fixedDispatcher;
if (pDispatcher->nodeId == nodeId) {
epsetAssign(&pDispatcher->epSet, pEpSet);
- stDebug("s-task:0x%x update the dispatch info, task:0x%x(nodeId:%d) newEpSet:%s", pTask->id.taskId,
- pDispatcher->taskId, nodeId, buf);
+ stDebug("s-task:0x%x update the dispatch info, task:0x%x(nodeId:%d) newEpset:%s", id, pDispatcher->taskId, nodeId,
+ buf);
}
- } else {
- // do nothing
}
}
@@ -766,21 +763,13 @@ int8_t streamTaskSetSchedStatusInactive(SStreamTask* pTask) {
return status;
}
-int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, int32_t resetRelHalt, bool metaLock) {
- if (pTask == NULL) {
- return TSDB_CODE_SUCCESS;
- }
-
+int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, int32_t resetRelHalt) {
SStreamMeta* pMeta = pTask->pMeta;
STaskId sTaskId = {.streamId = pTask->streamTaskId.streamId, .taskId = pTask->streamTaskId.taskId};
if (pTask->info.fillHistory == 0) {
return TSDB_CODE_SUCCESS;
}
- if (metaLock) {
- streamMetaWLock(pMeta);
- }
-
SStreamTask** ppStreamTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &sTaskId, sizeof(sTaskId));
if (ppStreamTask != NULL) {
stDebug("s-task:%s clear the related stream task:0x%x attr to fill-history task", pTask->id.idStr,
@@ -798,10 +787,6 @@ int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, int32_t resetRelHalt, bool
taosThreadMutexUnlock(&(*ppStreamTask)->lock);
}
- if (metaLock) {
- streamMetaWUnLock(pMeta);
- }
-
return TSDB_CODE_SUCCESS;
}
@@ -869,8 +854,8 @@ void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc)
pDst->chkpointTransId = pSrc->chkpointTransId;
}
-void streamTaskPause(SStreamMeta* pMeta, SStreamTask* pTask) {
- streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_PAUSE);
+static int32_t taskPauseCallback(SStreamTask* pTask, void* param) {
+ SStreamMeta* pMeta = pTask->pMeta;
int32_t num = atomic_add_fetch_32(&pMeta->numOfPausedTasks, 1);
stInfo("vgId:%d s-task:%s pause stream task. pause task num:%d", pMeta->vgId, pTask->id.idStr, num);
@@ -882,24 +867,24 @@ void streamTaskPause(SStreamMeta* pMeta, SStreamTask* pTask) {
}
stDebug("vgId:%d s-task:%s set pause flag and pause task", pMeta->vgId, pTask->id.idStr);
+ return TSDB_CODE_SUCCESS;
+}
+
+void streamTaskPause(SStreamMeta* pMeta, SStreamTask* pTask) {
+ streamTaskHandleEventAsync(pTask->status.pSM, TASK_EVENT_PAUSE, taskPauseCallback, NULL);
}
void streamTaskResume(SStreamTask* pTask) {
SStreamTaskState prevState = *streamTaskGetStatus(pTask);
- SStreamMeta* pMeta = pTask->pMeta;
- if (prevState.state == TASK_STATUS__PAUSE || prevState.state == TASK_STATUS__HALT) {
- streamTaskRestoreStatus(pTask);
-
- char* pNew = streamTaskGetStatus(pTask)->name;
- if (prevState.state == TASK_STATUS__PAUSE) {
- int32_t num = atomic_sub_fetch_32(&pMeta->numOfPausedTasks, 1);
- stInfo("s-task:%s status:%s resume from %s, paused task(s):%d", pTask->id.idStr, pNew, prevState.name, num);
- } else {
- stInfo("s-task:%s status:%s resume from %s", pTask->id.idStr, pNew, prevState.name);
- }
+ SStreamMeta* pMeta = pTask->pMeta;
+ int32_t code = streamTaskRestoreStatus(pTask);
+ if (code == TSDB_CODE_SUCCESS) {
+ char* pNew = streamTaskGetStatus(pTask)->name;
+ int32_t num = atomic_sub_fetch_32(&pMeta->numOfPausedTasks, 1);
+ stInfo("s-task:%s status:%s resume from %s, paused task(s):%d", pTask->id.idStr, pNew, prevState.name, num);
} else {
- stDebug("s-task:%s status:%s not in pause/halt status, no need to resume", pTask->id.idStr, prevState.name);
+ stInfo("s-task:%s status:%s no need to resume, paused task(s):%d", pTask->id.idStr, prevState.name, pMeta->numOfPausedTasks);
}
}
diff --git a/source/libs/stream/src/streamTaskSm.c b/source/libs/stream/src/streamTaskSm.c
index 83e71c42bc..6aa215586a 100644
--- a/source/libs/stream/src/streamTaskSm.c
+++ b/source/libs/stream/src/streamTaskSm.c
@@ -59,20 +59,23 @@ static int32_t streamTaskInitStatus(SStreamTask* pTask);
static int32_t streamTaskKeepCurrentVerInWal(SStreamTask* pTask);
static int32_t initStateTransferTable();
static void doInitStateTransferTable(void);
-static int32_t streamTaskSendTransSuccessMsg(SStreamTask* pTask);
static STaskStateTrans createStateTransform(ETaskStatus current, ETaskStatus next, EStreamTaskEvent event,
__state_trans_fn fn, __state_trans_succ_fn succFn,
- SAttachedEventInfo* pEventInfo, bool autoInvoke);
+ SFutureHandleEventInfo* pEventInfo);
static int32_t dummyFn(SStreamTask* UNUSED_PARAM(p)) { return TSDB_CODE_SUCCESS; }
-static int32_t attachEvent(SStreamTask* pTask, SAttachedEventInfo* pEvtInfo) {
+static int32_t attachWaitedEvent(SStreamTask* pTask, SFutureHandleEventInfo* pEvtInfo) {
char* p = streamTaskGetStatus(pTask)->name;
stDebug("s-task:%s status:%s attach event:%s required status:%s, since not allowed to handle it", pTask->id.idStr, p,
GET_EVT_NAME(pEvtInfo->event), StreamTaskStatusList[pEvtInfo->status].name);
- taosArrayPush(pTask->status.pSM->pWaitingEventList, pEvtInfo);
+
+ SArray* pList = pTask->status.pSM->pWaitingEventList;
+ taosArrayPush(pList, pEvtInfo);
+
+ stDebug("s-task:%s add into waiting list, total waiting events:%d", pTask->id.idStr, (int32_t)taosArrayGetSize(pList));
return 0;
}
@@ -85,18 +88,6 @@ int32_t streamTaskInitStatus(SStreamTask* pTask) {
return 0;
}
-static int32_t streamTaskDoCheckpoint(SStreamTask* pTask) {
- stDebug("s-task:%s start to do checkpoint", pTask->id.idStr);
- return 0;
-}
-
-int32_t streamTaskSendTransSuccessMsg(SStreamTask* pTask) {
- if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
- streamTaskSendCheckpointSourceRsp(pTask);
- }
- return 0;
-}
-
int32_t streamTaskKeepCurrentVerInWal(SStreamTask* pTask) {
if (!HAS_RELATED_FILLHISTORY_TASK(pTask)) {
stError("s-task:%s no related fill-history task, since it may have been dropped already", pTask->id.idStr);
@@ -170,9 +161,11 @@ static int32_t doHandleWaitingEvent(SStreamTaskSM* pSM, const char* pEventName,
stDebug("s-task:%s handle event:%s completed, elapsed time:%" PRId64 "ms state:%s -> %s", pTask->id.idStr,
pEventName, el, pSM->prev.state.name, pSM->current.name);
- SAttachedEventInfo* pEvtInfo = taosArrayGet(pSM->pWaitingEventList, 0);
+ ASSERT(taosArrayGetSize(pSM->pWaitingEventList) == 1);
- // OK, let's handle the attached event, since the task has reached the required status now
+ SFutureHandleEventInfo* pEvtInfo = taosArrayGet(pSM->pWaitingEventList, 0);
+
+ // OK, let's handle the waiting event, since the task has reached the required status now
if (pSM->current.state == pEvtInfo->status) {
stDebug("s-task:%s handle the event:%s in waiting list, state:%s", pTask->id.idStr,
GET_EVT_NAME(pEvtInfo->event), pSM->current.name);
@@ -189,7 +182,7 @@ static int32_t doHandleWaitingEvent(SStreamTaskSM* pSM, const char* pEventName,
code = pNextTrans->pAction(pSM->pTask);
if (pNextTrans->autoInvokeEndFn) {
- return streamTaskOnHandleEventSuccess(pSM, pNextTrans->event);
+ return streamTaskOnHandleEventSuccess(pSM, pNextTrans->event, pEvtInfo->callBackFn, pEvtInfo->pParam);
} else {
return code;
}
@@ -203,30 +196,61 @@ static int32_t doHandleWaitingEvent(SStreamTaskSM* pSM, const char* pEventName,
return code;
}
-void streamTaskRestoreStatus(SStreamTask* pTask) {
+static int32_t removeEventInWaitingList(SStreamTask* pTask, EStreamTaskEvent event) {
SStreamTaskSM* pSM = pTask->status.pSM;
+ bool removed = false;
taosThreadMutexLock(&pTask->lock);
- ASSERT(pSM->pActiveTrans == NULL);
- ASSERT(pSM->current.state == TASK_STATUS__PAUSE || pSM->current.state == TASK_STATUS__HALT);
+ int32_t num = taosArrayGetSize(pSM->pWaitingEventList);
+ for (int32_t i = 0; i < num; ++i) {
+ SFutureHandleEventInfo* pInfo = taosArrayGet(pSM->pWaitingEventList, i);
+ if (pInfo->event == event) {
+ taosArrayRemove(pSM->pWaitingEventList, i);
+ stDebug("s-task:%s pause event in waiting list not be handled yet, remove it from waiting list, remaining:%d",
+ pTask->id.idStr, pInfo->event);
+ removed = true;
+ break;
+ }
+ }
- SStreamTaskState state = pSM->current;
- pSM->current = pSM->prev.state;
-
- pSM->prev.state = state;
- pSM->prev.evt = 0;
-
- pSM->startTs = taosGetTimestampMs();
-
- if (taosArrayGetSize(pSM->pWaitingEventList) > 0) {
- stDebug("s-task:%s restore status, %s -> %s, and then handle waiting event", pTask->id.idStr, pSM->prev.state.name, pSM->current.name);
- doHandleWaitingEvent(pSM, "restore-pause/halt", pTask);
- } else {
- stDebug("s-task:%s restore status, %s -> %s", pTask->id.idStr, pSM->prev.state.name, pSM->current.name);
+ if (!removed) {
+ stDebug("s-task:%s failed to remove event:%s in waiting list", pTask->id.idStr, StreamTaskEventList[event].name);
}
taosThreadMutexUnlock(&pTask->lock);
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t streamTaskRestoreStatus(SStreamTask* pTask) {
+ SStreamTaskSM* pSM = pTask->status.pSM;
+ int32_t code = 0;
+
+ taosThreadMutexLock(&pTask->lock);
+
+ if (pSM->current.state == TASK_STATUS__PAUSE && pSM->pActiveTrans == NULL) {
+ SStreamTaskState state = pSM->current;
+ pSM->current = pSM->prev.state;
+
+ pSM->prev.state = state;
+ pSM->prev.evt = 0;
+
+ pSM->startTs = taosGetTimestampMs();
+
+ if (taosArrayGetSize(pSM->pWaitingEventList) > 0) {
+ stDebug("s-task:%s restore status, %s -> %s, and then handle waiting event", pTask->id.idStr,
+ pSM->prev.state.name, pSM->current.name);
+ doHandleWaitingEvent(pSM, "restore-pause/halt", pTask);
+ } else {
+ stDebug("s-task:%s restore status, %s -> %s", pTask->id.idStr, pSM->prev.state.name, pSM->current.name);
+ }
+ } else {
+ removeEventInWaitingList(pTask, TASK_EVENT_PAUSE);
+ code = -1; // failed to restore the status
+ }
+
+ taosThreadMutexUnlock(&pTask->lock);
+ return code;
}
SStreamTaskSM* streamCreateStateMachine(SStreamTask* pTask) {
@@ -242,7 +266,7 @@ SStreamTaskSM* streamCreateStateMachine(SStreamTask* pTask) {
}
pSM->pTask = pTask;
- pSM->pWaitingEventList = taosArrayInit(4, sizeof(SAttachedEventInfo));
+ pSM->pWaitingEventList = taosArrayInit(4, sizeof(SFutureHandleEventInfo));
if (pSM->pWaitingEventList == NULL) {
taosMemoryFree(pSM);
@@ -273,7 +297,7 @@ static int32_t doHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event, STaskSt
const char* id = pTask->id.idStr;
if (pTrans->attachEvent.event != 0) {
- attachEvent(pTask, &pTrans->attachEvent);
+ attachWaitedEvent(pTask, &pTrans->attachEvent);
taosThreadMutexUnlock(&pTask->lock);
while (1) {
@@ -303,7 +327,32 @@ static int32_t doHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event, STaskSt
// todo handle error code;
if (pTrans->autoInvokeEndFn) {
- streamTaskOnHandleEventSuccess(pSM, event);
+ streamTaskOnHandleEventSuccess(pSM, event, NULL, NULL);
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t doHandleEventAsync(SStreamTaskSM* pSM, EStreamTaskEvent event, STaskStateTrans* pTrans, __state_trans_user_fn callbackFn, void* param) {
+ SStreamTask* pTask = pSM->pTask;
+ if (pTrans->attachEvent.event != 0) {
+ SFutureHandleEventInfo info = pTrans->attachEvent;
+ info.pParam = param;
+ info.callBackFn = callbackFn;
+
+ attachWaitedEvent(pTask, &info);
+ taosThreadMutexUnlock(&pTask->lock);
+ } else { // override current active trans
+ pSM->pActiveTrans = pTrans;
+ pSM->startTs = taosGetTimestampMs();
+ taosThreadMutexUnlock(&pTask->lock);
+
+ int32_t code = pTrans->pAction(pTask);
+ // todo handle error code;
+
+ if (pTrans->autoInvokeEndFn) {
+ streamTaskOnHandleEventSuccess(pSM, event, callbackFn, param);
}
}
@@ -349,6 +398,45 @@ int32_t streamTaskHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event) {
return code;
}
+int32_t streamTaskHandleEventAsync(SStreamTaskSM* pSM, EStreamTaskEvent event, __state_trans_user_fn callbackFn, void* param) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ SStreamTask* pTask = pSM->pTask;
+ STaskStateTrans* pTrans = NULL;
+
+ while (1) {
+ taosThreadMutexLock(&pTask->lock);
+
+ if (pSM->pActiveTrans != NULL && pSM->pActiveTrans->autoInvokeEndFn) {
+ EStreamTaskEvent evt = pSM->pActiveTrans->event;
+ taosThreadMutexUnlock(&pTask->lock);
+
+ stDebug("s-task:%s status:%s handling event:%s by some other thread, wait for 100ms and check if completed",
+ pTask->id.idStr, pSM->current.name, GET_EVT_NAME(evt));
+ taosMsleep(100);
+ } else {
+ // no active event trans exists, handle this event directly
+ pTrans = streamTaskFindTransform(pSM->current.state, event);
+ if (pTrans == NULL) {
+ stDebug("s-task:%s failed to handle event:%s, status:%s", pTask->id.idStr, GET_EVT_NAME(event), pSM->current.name);
+ taosThreadMutexUnlock(&pTask->lock);
+ return TSDB_CODE_STREAM_INVALID_STATETRANS;
+ }
+
+ if (pSM->pActiveTrans != NULL) {
+ // currently in some state transfer procedure, not auto invoke transfer, quit from this procedure
+ stDebug("s-task:%s event:%s handle procedure quit, status %s -> %s failed, handle event %s now",
+ pTask->id.idStr, GET_EVT_NAME(pSM->pActiveTrans->event), pSM->current.name,
+ pSM->pActiveTrans->next.name, GET_EVT_NAME(event));
+ }
+
+ code = doHandleEventAsync(pSM, event, pTrans, callbackFn, param);
+ break;
+ }
+ }
+
+ return code;
+}
+
static void keepPrevInfo(SStreamTaskSM* pSM) {
STaskStateTrans* pTrans = pSM->pActiveTrans;
@@ -356,8 +444,9 @@ static void keepPrevInfo(SStreamTaskSM* pSM) {
pSM->prev.evt = pTrans->event;
}
-int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent event) {
+int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent event, __state_trans_user_fn callbackFn, void* param) {
SStreamTask* pTask = pSM->pTask;
+ const char* id = pTask->id.idStr;
// do update the task status
taosThreadMutexLock(&pTask->lock);
@@ -369,16 +458,16 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even
s == TASK_STATUS__UNINIT || s == TASK_STATUS__READY);
// the pSM->prev.evt may be 0, so print string is not appropriate.
- stDebug("s-task:%s event:%s handled failed, current status:%s, trigger event:%s", pTask->id.idStr,
- GET_EVT_NAME(event), pSM->current.name, GET_EVT_NAME(pSM->prev.evt));
+ stDebug("s-task:%s event:%s handled failed, current status:%s, trigger event:%s", id, GET_EVT_NAME(event),
+ pSM->current.name, GET_EVT_NAME(pSM->prev.evt));
taosThreadMutexUnlock(&pTask->lock);
return TSDB_CODE_STREAM_INVALID_STATETRANS;
}
if (pTrans->event != event) {
- stWarn("s-task:%s handle event:%s failed, current status:%s, active trans evt:%s", pTask->id.idStr,
- GET_EVT_NAME(event), pSM->current.name, GET_EVT_NAME(pTrans->event));
+ stWarn("s-task:%s handle event:%s failed, current status:%s, active trans evt:%s", id, GET_EVT_NAME(event),
+ pSM->current.name, GET_EVT_NAME(pTrans->event));
taosThreadMutexUnlock(&pTask->lock);
return TSDB_CODE_STREAM_INVALID_STATETRANS;
}
@@ -388,16 +477,31 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even
pSM->current = pTrans->next;
pSM->pActiveTrans = NULL;
+ // todo remove it
// on success callback, add into lock if necessary, or maybe we should add an option for this?
pTrans->pSuccAction(pTask);
+ taosThreadMutexUnlock(&pTask->lock);
+
+ // todo: add parameter to control lock
+ // after handling the callback function assigned by invoker, go on handling the waiting tasks
+ if (callbackFn != NULL) {
+ stDebug("s-task:%s start to handle user-specified callback fn for event:%s", id, GET_EVT_NAME(pTrans->event));
+ callbackFn(pSM->pTask, param);
+
+ stDebug("s-task:%s handle user-specified callback fn for event:%s completed", id, GET_EVT_NAME(pTrans->event));
+ }
+
+ taosThreadMutexLock(&pTask->lock);
+
+ // tasks in waiting list
if (taosArrayGetSize(pSM->pWaitingEventList) > 0) {
doHandleWaitingEvent(pSM, GET_EVT_NAME(pTrans->event), pTask);
} else {
taosThreadMutexUnlock(&pTask->lock);
int64_t el = (taosGetTimestampMs() - pSM->startTs);
- stDebug("s-task:%s handle event:%s completed, elapsed time:%" PRId64 "ms state:%s -> %s", pTask->id.idStr,
+ stDebug("s-task:%s handle event:%s completed, elapsed time:%" PRId64 "ms state:%s -> %s", id,
GET_EVT_NAME(pTrans->event), el, pSM->prev.state.name, pSM->current.name);
}
@@ -453,7 +557,7 @@ void streamTaskSetStatusReady(SStreamTask* pTask) {
}
STaskStateTrans createStateTransform(ETaskStatus current, ETaskStatus next, EStreamTaskEvent event, __state_trans_fn fn,
- __state_trans_succ_fn succFn, SAttachedEventInfo* pEventInfo, bool autoInvoke) {
+ __state_trans_succ_fn succFn, SFutureHandleEventInfo* pEventInfo) {
STaskStateTrans trans = {0};
trans.state = StreamTaskStatusList[current];
trans.next = StreamTaskStatusList[next];
@@ -468,7 +572,7 @@ STaskStateTrans createStateTransform(ETaskStatus current, ETaskStatus next, EStr
trans.pAction = (fn != NULL) ? fn : dummyFn;
trans.pSuccAction = (succFn != NULL) ? succFn : dummyFn;
- trans.autoInvokeEndFn = autoInvoke;
+ trans.autoInvokeEndFn = (fn == NULL);
return trans;
}
@@ -482,93 +586,93 @@ void doInitStateTransferTable(void) {
streamTaskSMTrans = taosArrayInit(8, sizeof(STaskStateTrans));
// initialization event handle
- STaskStateTrans trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__READY, TASK_EVENT_INIT, streamTaskInitStatus, streamTaskOnNormalTaskReady, false, false);
+ STaskStateTrans trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__READY, TASK_EVENT_INIT, streamTaskInitStatus, streamTaskOnNormalTaskReady, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__SCAN_HISTORY, TASK_EVENT_INIT_SCANHIST, streamTaskInitStatus, streamTaskOnScanhistoryTaskReady, false, false);
+ trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__SCAN_HISTORY, TASK_EVENT_INIT_SCANHIST, streamTaskInitStatus, streamTaskOnScanhistoryTaskReady, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
// scan-history related event
- trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__READY, TASK_EVENT_SCANHIST_DONE, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__READY, TASK_EVENT_SCANHIST_DONE, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
// halt stream task, from other task status
- trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL, true);
+ trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL, true);
+ trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- SAttachedEventInfo info = {.status = TASK_STATUS__READY, .event = TASK_EVENT_HALT};
+ SFutureHandleEventInfo info = {.status = TASK_STATUS__READY, .event = TASK_EVENT_HALT};
- trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, &info, true);
+ trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, &info);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL, true);
+ trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
// checkpoint related event
- trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__CK, TASK_EVENT_GEN_CHECKPOINT, NULL, streamTaskDoCheckpoint, NULL, true);
+ trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__CK, TASK_EVENT_GEN_CHECKPOINT, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__CK, TASK_EVENT_GEN_CHECKPOINT, NULL, streamTaskDoCheckpoint, NULL, true);
+ trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__CK, TASK_EVENT_GEN_CHECKPOINT, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__READY, TASK_EVENT_CHECKPOINT_DONE, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__READY, TASK_EVENT_CHECKPOINT_DONE, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
// pause & resume related event handle
- trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- info = (SAttachedEventInfo){.status = TASK_STATUS__READY, .event = TASK_EVENT_PAUSE};
- trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info, true);
+ info = (SFutureHandleEventInfo){.status = TASK_STATUS__READY, .event = TASK_EVENT_PAUSE};
+ trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info, true);
+ trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__STOP, TASK_STATUS__STOP, TASK_EVENT_PAUSE, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__STOP, TASK_STATUS__STOP, TASK_EVENT_PAUSE, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__DROPPING, TASK_STATUS__DROPPING, TASK_EVENT_PAUSE, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__DROPPING, TASK_STATUS__DROPPING, TASK_EVENT_PAUSE, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
// resume is completed by restore status of state-machine
// stop related event
- trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__DROPPING, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__DROPPING, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__STOP, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__STOP, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
// dropping related event
- trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__DROPPING, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__DROPPING, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__STOP, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__STOP, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
- trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, streamTaskSendTransSuccessMsg, NULL, NULL, true);
+ trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL);
taosArrayPush(streamTaskSMTrans, &trans);
}
//clang-format on
\ No newline at end of file
diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h
index 5b18d56d70..c010e31320 100644
--- a/source/libs/transport/inc/transComm.h
+++ b/source/libs/transport/inc/transComm.h
@@ -256,21 +256,21 @@ void transAsyncPoolDestroy(SAsyncPool* pool);
int transAsyncSend(SAsyncPool* pool, queue* mq);
bool transAsyncPoolIsEmpty(SAsyncPool* pool);
-#define TRANS_DESTROY_ASYNC_POOL_MSG(pool, msgType, freeFunc) \
- do { \
- for (int i = 0; i < pool->nAsync; i++) { \
- uv_async_t* async = &(pool->asyncs[i]); \
- SAsyncItem* item = async->data; \
- while (!QUEUE_IS_EMPTY(&item->qmsg)) { \
- tTrace("destroy msg in async pool "); \
- queue* h = QUEUE_HEAD(&item->qmsg); \
- QUEUE_REMOVE(h); \
- msgType* msg = QUEUE_DATA(h, msgType, q); \
- if (msg != NULL) { \
- freeFunc(msg); \
- } \
- } \
- } \
+#define TRANS_DESTROY_ASYNC_POOL_MSG(pool, msgType, freeFunc, param) \
+ do { \
+ for (int i = 0; i < pool->nAsync; i++) { \
+ uv_async_t* async = &(pool->asyncs[i]); \
+ SAsyncItem* item = async->data; \
+ while (!QUEUE_IS_EMPTY(&item->qmsg)) { \
+ tTrace("destroy msg in async pool "); \
+ queue* h = QUEUE_HEAD(&item->qmsg); \
+ QUEUE_REMOVE(h); \
+ msgType* msg = QUEUE_DATA(h, msgType, q); \
+ if (msg != NULL) { \
+ freeFunc(msg, param); \
+ } \
+ } \
+ } \
} while (0)
#define ASYNC_CHECK_HANDLE(exh1, id) \
diff --git a/source/libs/transport/src/thttp.c b/source/libs/transport/src/thttp.c
index 6de10cbb9e..c4ca39c323 100644
--- a/source/libs/transport/src/thttp.c
+++ b/source/libs/transport/src/thttp.c
@@ -191,6 +191,15 @@ static void httpDestroyMsg(SHttpMsg* msg) {
taosMemoryFree(msg->cont);
taosMemoryFree(msg);
}
+static void httpDestroyMsgWrapper(void* cont, void* param) {
+ httpDestroyMsg((SHttpMsg*)cont);
+ // if (msg == NULL) return;
+
+ // taosMemoryFree(msg->server);
+ // taosMemoryFree(msg->uri);
+ // taosMemoryFree(msg->cont);
+ // taosMemoryFree(msg);
+}
static void httpMayDiscardMsg(SHttpModule* http, SAsyncItem* item) {
SHttpMsg *msg = NULL, *quitMsg = NULL;
@@ -554,7 +563,7 @@ void transHttpEnvDestroy() {
httpSendQuit();
taosThreadJoin(load->thread, NULL);
- TRANS_DESTROY_ASYNC_POOL_MSG(load->asyncPool, SHttpMsg, httpDestroyMsg);
+ TRANS_DESTROY_ASYNC_POOL_MSG(load->asyncPool, SHttpMsg, httpDestroyMsgWrapper, NULL);
transAsyncPoolDestroy(load->asyncPool);
uv_loop_close(load->loop);
taosMemoryFree(load->loop);
diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c
index e937d2e65e..6ae72eac14 100644
--- a/source/libs/transport/src/transCli.c
+++ b/source/libs/transport/src/transCli.c
@@ -219,6 +219,8 @@ static void (*cliAsyncHandle[])(SCliMsg* pMsg, SCliThrd* pThrd) = {cliHandleReq,
/// NULL,cliHandleUpdate};
static FORCE_INLINE void destroyCmsg(void* cmsg);
+
+static FORCE_INLINE void destroyCmsgWrapper(void* arg, void* param);
static FORCE_INLINE void destroyCmsgAndAhandle(void* cmsg);
static FORCE_INLINE int cliRBChoseIdx(STrans* pTransInst);
static FORCE_INLINE void transDestroyConnCtx(STransConnCtx* ctx);
@@ -1963,7 +1965,17 @@ static FORCE_INLINE void destroyCmsg(void* arg) {
transFreeMsg(pMsg->msg.pCont);
taosMemoryFree(pMsg);
}
-
+static FORCE_INLINE void destroyCmsgWrapper(void* arg, void* param) {
+ SCliMsg* pMsg = arg;
+ if (pMsg == NULL) {
+ return;
+ }
+ if (param != NULL) {
+ SCliThrd* pThrd = param;
+ if (pThrd->destroyAhandleFp) (*pThrd->destroyAhandleFp)(pMsg->msg.info.ahandle);
+ }
+ destroyCmsg(pMsg);
+}
static FORCE_INLINE void destroyCmsgAndAhandle(void* param) {
if (param == NULL) return;
@@ -2057,7 +2069,7 @@ static void destroyThrdObj(SCliThrd* pThrd) {
taosThreadJoin(pThrd->thread, NULL);
CLI_RELEASE_UV(pThrd->loop);
taosThreadMutexDestroy(&pThrd->msgMtx);
- TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SCliMsg, destroyCmsg);
+ TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SCliMsg, destroyCmsgWrapper, (void*)pThrd);
transAsyncPoolDestroy(pThrd->asyncPool);
transDQDestroy(pThrd->delayQueue, destroyCmsgAndAhandle);
diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c
index b324ca5f91..5a1ef31b7d 100644
--- a/source/libs/transport/src/transSvr.c
+++ b/source/libs/transport/src/transSvr.c
@@ -159,7 +159,7 @@ static void uvStartSendResp(SSvrMsg* msg);
static void uvNotifyLinkBrokenToApp(SSvrConn* conn);
-static FORCE_INLINE void destroySmsg(SSvrMsg* smsg);
+static FORCE_INLINE void destroySmsg(SSvrMsg* smsg);
static FORCE_INLINE SSvrConn* createConn(void* hThrd);
static FORCE_INLINE void destroyConn(SSvrConn* conn, bool clear /*clear handle or not*/);
static FORCE_INLINE void destroyConnRegArg(SSvrConn* conn);
@@ -671,7 +671,8 @@ static FORCE_INLINE void destroySmsg(SSvrMsg* smsg) {
transFreeMsg(smsg->msg.pCont);
taosMemoryFree(smsg);
}
-static void destroyAllConn(SWorkThrd* pThrd) {
+static FORCE_INLINE void destroySmsgWrapper(void* smsg, void* param) { destroySmsg((SSvrMsg*)smsg); }
+static void destroyAllConn(SWorkThrd* pThrd) {
tTrace("thread %p destroy all conn ", pThrd);
while (!QUEUE_IS_EMPTY(&pThrd->conn)) {
queue* h = QUEUE_HEAD(&pThrd->conn);
@@ -1394,7 +1395,7 @@ void destroyWorkThrd(SWorkThrd* pThrd) {
}
taosThreadJoin(pThrd->thread, NULL);
SRV_RELEASE_UV(pThrd->loop);
- TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SSvrMsg, destroySmsg);
+ TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SSvrMsg, destroySmsgWrapper, NULL);
transAsyncPoolDestroy(pThrd->asyncPool);
uvWhiteListDestroy(pThrd->pWhiteList);
diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task
index 62f2d10525..c32790022a 100644
--- a/tests/parallel_test/cases.task
+++ b/tests/parallel_test/cases.task
@@ -238,7 +238,8 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqSubscribeStb-r3.py -N 5
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 6 -M 3 -i True
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 6 -M 3 -n 3 -i True
-,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform.py -N 2 -n 1
+,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform-db-removewal.py -N 2 -n 1
+,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform-stb-removewal.py -N 6 -n 3
,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform-stb.py -N 2 -n 1
,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform-stb.py -N 6 -n 3
#,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform-db.py -N 6 -n 3
@@ -1085,6 +1086,7 @@
,,y,script,./test.sh -f tsim/parser/join_multivnode.sim
,,y,script,./test.sh -f tsim/parser/join.sim
,,y,script,./test.sh -f tsim/parser/last_cache.sim
+,,y,script,./test.sh -f tsim/parser/last_both.sim
,,y,script,./test.sh -f tsim/parser/last_groupby.sim
,,y,script,./test.sh -f tsim/parser/lastrow.sim
,,y,script,./test.sh -f tsim/parser/lastrow2.sim
diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh
index 3b3d275a07..9af2525c74 100755
--- a/tests/script/sh/deploy.sh
+++ b/tests/script/sh/deploy.sh
@@ -117,7 +117,7 @@ echo "supportVnodes 1024" >> $TAOS_CFG
echo "statusInterval 1" >> $TAOS_CFG
echo "dataDir $DATA_DIR" >> $TAOS_CFG
echo "logDir $LOG_DIR" >> $TAOS_CFG
-echo "debugFlag 0" >> $TAOS_CFG
+echo "debugFlag 135" >> $TAOS_CFG
echo "tmrDebugFlag 131" >> $TAOS_CFG
echo "uDebugFlag 143" >> $TAOS_CFG
echo "rpcDebugFlag 143" >> $TAOS_CFG
diff --git a/tests/script/tsim/parser/last_both.sim b/tests/script/tsim/parser/last_both.sim
new file mode 100644
index 0000000000..e01a966744
--- /dev/null
+++ b/tests/script/tsim/parser/last_both.sim
@@ -0,0 +1,150 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print ======================== dnode1 start
+$db = testdb
+sql drop database if exists $db
+sql create database $db cachemodel 'none' minrows 10 stt_trigger 1
+sql use $db
+
+sql create stable st2 (ts timestamp, f1 int, f2 double, f3 binary(10), f4 timestamp) tags (id int)
+sql create table tb1 using st2 tags (1);
+sql create table tb2 using st2 tags (2);
+sql create table tb3 using st2 tags (3);
+sql create table tb4 using st2 tags (4);
+sql create table tb5 using st2 tags (1);
+sql create table tb6 using st2 tags (2);
+sql create table tb7 using st2 tags (3);
+sql create table tb8 using st2 tags (4);
+sql create table tb9 using st2 tags (5);
+sql create table tba using st2 tags (5);
+sql create table tbb using st2 tags (5);
+sql create table tbc using st2 tags (5);
+sql create table tbd using st2 tags (5);
+sql create table tbe using st2 tags (5);
+sql create table tbf using st2 tags (5);
+
+sql insert into tb9 values ("2021-05-09 10:12:26.000",28, 29, '30', -1005)
+sql insert into tb9 values ("2021-05-09 10:12:26.001",28, 29, '30', -1005)
+sql insert into tb9 values ("2021-05-09 10:12:26.002",28, 29, '30', -1005)
+sql insert into tb9 values ("2021-05-09 10:12:26.003",28, 29, '30', -1005)
+sql insert into tb9 values ("2021-05-09 10:12:26.004",28, 29, '30', -1005)
+sql insert into tb9 values ("2021-05-09 10:12:26.005",28, 29, '30', -1005)
+sql insert into tb9 values ("2021-05-09 10:12:26.006",28, 29, '30', -1005)
+sql insert into tb9 values ("2021-05-09 10:12:26.007",28, 29, '30', -1005)
+sql insert into tb9 values ("2021-05-09 10:12:26.008",28, 29, '30', -1005)
+sql insert into tb9 values ("2021-05-09 10:12:26.009",28, 29, '30', -1005)
+sql delete from tb9 where ts = "2021-05-09 10:12:26.000"
+sql flush database $db
+
+sql insert into tb1 values ("2021-05-09 10:10:10", 1, 2.0, '3', -1000)
+sql insert into tb1 values ("2021-05-10 10:10:11", 4, 5.0, NULL, -2000)
+sql insert into tb1 values ("2021-05-12 10:10:12", 6,NULL, NULL, -3000)
+
+sql insert into tb2 values ("2021-05-09 10:11:13",-1,-2.0,'-3', -1001)
+sql insert into tb2 values ("2021-05-10 10:11:14",-4,-5.0, NULL, -2001)
+sql insert into tb2 values ("2021-05-11 10:11:15",-6, -7, '-8', -3001)
+
+sql insert into tb3 values ("2021-05-09 10:12:17", 7, 8.0, '9' , -1002)
+sql insert into tb3 values ("2021-05-09 10:12:17",10,11.0, NULL, -2002)
+sql insert into tb3 values ("2021-05-09 10:12:18",12,NULL, NULL, -3002)
+
+sql insert into tb4 values ("2021-05-09 10:12:19",13,14.0,'15' , -1003)
+sql insert into tb4 values ("2021-05-10 10:12:20",16,17.0, NULL, -2003)
+sql insert into tb4 values ("2021-05-11 10:12:21",18,NULL, NULL, -3003)
+
+sql insert into tb5 values ("2021-05-09 10:12:22",19, 20, '21', -1004)
+sql insert into tb6 values ("2021-05-11 10:12:23",22, 23, NULL, -2004)
+sql insert into tb7 values ("2021-05-10 10:12:24",24,NULL, '25', -3004)
+sql insert into tb8 values ("2021-05-11 10:12:25",26,NULL, '27', -4004)
+
+sql insert into tba values ("2021-05-10 10:12:27",31, 32, NULL, -2005)
+sql insert into tbb values ("2021-05-10 10:12:28",33,NULL, '35', -3005)
+sql insert into tbc values ("2021-05-11 10:12:29",36, 37, NULL, -4005)
+sql insert into tbd values ("2021-05-11 10:12:29",NULL,NULL,NULL,NULL )
+
+sql drop table tbf;
+sql alter table st2 add column c1 int;
+sql alter table st2 drop column c1;
+
+run tsim/parser/last_both_query.sim
+
+sql flush database $db
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode1 -s start
+
+run tsim/parser/last_both_query.sim
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode1 -s start
+
+sql drop database if exists $db
+sql create database $db minrows 10 stt_trigger 1
+sql use $db
+
+sql create stable st2 (ts timestamp, f1 int, f2 double, f3 binary(10), f4 timestamp) tags (id int)
+sql create table tb1 using st2 tags (1);
+sql create table tb2 using st2 tags (2);
+sql create table tb3 using st2 tags (3);
+sql create table tb4 using st2 tags (4);
+sql create table tb5 using st2 tags (1);
+sql create table tb6 using st2 tags (2);
+sql create table tb7 using st2 tags (3);
+sql create table tb8 using st2 tags (4);
+sql create table tb9 using st2 tags (5);
+sql create table tba using st2 tags (5);
+sql create table tbb using st2 tags (5);
+sql create table tbc using st2 tags (5);
+sql create table tbd using st2 tags (5);
+sql create table tbe using st2 tags (5);
+sql create table tbf using st2 tags (5);
+
+sql insert into tb9 values ("2021-05-09 10:12:26.000",28, 29, '30', -1005)
+sql insert into tb9 values ("2021-05-09 10:12:26.001",28, 29, '30', -1005)
+sql insert into tb9 values ("2021-05-09 10:12:26.002",28, 29, '30', -1005)
+sql insert into tb9 values ("2021-05-09 10:12:26.003",28, 29, '30', -1005)
+sql insert into tb9 values ("2021-05-09 10:12:26.004",28, 29, '30', -1005)
+sql insert into tb9 values ("2021-05-09 10:12:26.005",28, 29, '30', -1005)
+sql insert into tb9 values ("2021-05-09 10:12:26.006",28, 29, '30', -1005)
+sql insert into tb9 values ("2021-05-09 10:12:26.007",28, 29, '30', -1005)
+sql insert into tb9 values ("2021-05-09 10:12:26.008",28, 29, '30', -1005)
+sql insert into tb9 values ("2021-05-09 10:12:26.009",28, 29, '30', -1005)
+sql delete from tb9 where ts = "2021-05-09 10:12:26.000"
+sql flush database $db
+
+sql insert into tb1 values ("2021-05-09 10:10:10", 1, 2.0, '3', -1000)
+sql insert into tb1 values ("2021-05-10 10:10:11", 4, 5.0, NULL, -2000)
+sql insert into tb1 values ("2021-05-12 10:10:12", 6,NULL, NULL, -3000)
+
+sql insert into tb2 values ("2021-05-09 10:11:13",-1,-2.0,'-3', -1001)
+sql insert into tb2 values ("2021-05-10 10:11:14",-4,-5.0, NULL, -2001)
+sql insert into tb2 values ("2021-05-11 10:11:15",-6, -7, '-8', -3001)
+
+sql insert into tb3 values ("2021-05-09 10:12:17", 7, 8.0, '9' , -1002)
+sql insert into tb3 values ("2021-05-09 10:12:17",10,11.0, NULL, -2002)
+sql insert into tb3 values ("2021-05-09 10:12:18",12,NULL, NULL, -3002)
+
+sql insert into tb4 values ("2021-05-09 10:12:19",13,14.0,'15' , -1003)
+sql insert into tb4 values ("2021-05-10 10:12:20",16,17.0, NULL, -2003)
+sql insert into tb4 values ("2021-05-11 10:12:21",18,NULL, NULL, -3003)
+
+sql insert into tb5 values ("2021-05-09 10:12:22",19, 20, '21', -1004)
+sql insert into tb6 values ("2021-05-11 10:12:23",22, 23, NULL, -2004)
+sql insert into tb7 values ("2021-05-10 10:12:24",24,NULL, '25', -3004)
+sql insert into tb8 values ("2021-05-11 10:12:25",26,NULL, '27', -4004)
+
+sql insert into tba values ("2021-05-10 10:12:27",31, 32, NULL, -2005)
+sql insert into tbb values ("2021-05-10 10:12:28",33,NULL, '35', -3005)
+sql insert into tbc values ("2021-05-11 10:12:29",36, 37, NULL, -4005)
+sql insert into tbd values ("2021-05-11 10:12:29",NULL,NULL,NULL,NULL )
+
+sql drop table tbf
+sql alter database $db cachemodel 'both'
+sql alter database $db cachesize 2
+sleep 11000
+
+run tsim/parser/last_both_query.sim
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/last_both_query.sim b/tests/script/tsim/parser/last_both_query.sim
new file mode 100644
index 0000000000..5f86412199
--- /dev/null
+++ b/tests/script/tsim/parser/last_both_query.sim
@@ -0,0 +1,496 @@
+
+sql connect
+
+$db = testdb
+sql use $db
+print "test tb1"
+
+sql select last(ts) from tb1
+if $rows != 1 then
+ return -1
+endi
+if $data00 != @21-05-12 10:10:12.000@ then
+ print $data00
+ return -1
+endi
+
+sql select last(f1) from tb1
+if $rows != 1 then
+ return -1
+endi
+if $data00 != 6 then
+ print $data00
+ return -1
+endi
+
+sql select last(*) from tb1
+if $rows != 1 then
+ return -1
+endi
+if $data00 != @21-05-12 10:10:12.000@ then
+ print $data00
+ return -1
+endi
+if $data01 != 6 then
+ return -1
+endi
+if $data02 != 5.000000000 then
+ print $data02
+ return -1
+endi
+if $data03 != 3 then
+ print expect 3, actual: $data03
+ return -1
+endi
+if $data04 != @70-01-01 07:59:57.000@ then
+ return -1
+endi
+
+sql select last(tb1.*,ts,f4) from tb1
+if $rows != 1 then
+ return -1
+endi
+if $data00 != @21-05-12 10:10:12.000@ then
+ print $data00
+ return -1
+endi
+if $data01 != 6 then
+ return -1
+endi
+if $data02 != 5.000000000 then
+ print $data02
+ return -1
+endi
+if $data03 != 3 then
+ return -1
+endi
+if $data04 != @70-01-01 07:59:57.000@ then
+ return -1
+endi
+if $data05 != @21-05-12 10:10:12.000@ then
+ print $data00
+ return -1
+endi
+if $data06 != @70-01-01 07:59:57.000@ then
+ return -1
+endi
+
+print "test tb2"
+sql select last(ts) from tb2
+if $rows != 1 then
+ return -1
+endi
+if $data00 != @21-05-11 10:11:15.000@ then
+ print $data00
+ return -1
+endi
+
+sql select last(f1) from tb2
+if $rows != 1 then
+ return -1
+endi
+if $data00 != -6 then
+ print $data00
+ return -1
+endi
+
+sql select last(*) from tb2
+if $rows != 1 then
+ return -1
+endi
+if $data00 != @21-05-11 10:11:15.000@ then
+ print $data00
+ return -1
+endi
+if $data01 != -6 then
+ return -1
+endi
+if $data02 != -7.000000000 then
+ print $data02
+ return -1
+endi
+if $data03 != -8 then
+ return -1
+endi
+if $data04 != @70-01-01 07:59:56.999@ then
+ if $data04 != @70-01-01 07:59:57.-01@ then
+ return -1
+ endi
+endi
+
+sql select last(tb2.*,ts,f4) from tb2
+if $rows != 1 then
+ return -1
+endi
+if $data00 != @21-05-11 10:11:15.000@ then
+ print $data00
+ return -1
+endi
+if $data01 != -6 then
+ return -1
+endi
+if $data02 != -7.000000000 then
+ print $data02
+ return -1
+endi
+if $data03 != -8 then
+ return -1
+endi
+if $data04 != @70-01-01 07:59:56.999@ then
+ if $data04 != @70-01-01 07:59:57.-01@ then
+ return -1
+ endi
+endi
+if $data05 != @21-05-11 10:11:15.000@ then
+ print $data00
+ return -1
+endi
+if $data06 != @70-01-01 07:59:56.999@ then
+ if $data04 != @70-01-01 07:59:57.-01@ then
+ return -1
+ endi
+endi
+
+print "test tbd"
+sql select last(*) from tbd
+if $rows != 1 then
+ return -1
+endi
+if $data00 != @21-05-11 10:12:29.000@ then
+ print $data00
+ return -1
+endi
+if $data01 != NULL then
+ return -1
+endi
+if $data02 != NULL then
+ print $data02
+ return -1
+endi
+if $data03 != NULL then
+ return -1
+endi
+if $data04 != NULL then
+ return -1
+endi
+
+print "test tbe"
+sql select last(*) from tbe
+if $rows != 0 then
+ return -1
+endi
+
+print "test stable"
+sql select last(ts) from st2
+if $rows != 1 then
+ return -1
+endi
+if $data00 != @21-05-12 10:10:12.000@ then
+ print $data00
+ return -1
+endi
+
+sql select last(f1) from st2
+if $rows != 1 then
+ return -1
+endi
+if $data00 != 6 then
+ print $data00
+ return -1
+endi
+
+sql select last(*) from st2
+if $rows != 1 then
+ return -1
+endi
+if $data00 != @21-05-12 10:10:12.000@ then
+ print $data00
+ return -1
+endi
+if $data01 != 6 then
+ return -1
+endi
+if $data02 != 37.000000000 then
+ print expect 37.000000000 actual: $data02
+ return -1
+endi
+if $data03 != 27 then
+ return -1
+endi
+if $data04 != @70-01-01 07:59:57.000@ then
+ return -1
+endi
+
+
+sql select last(st2.*,ts,f4) from st2
+if $rows != 1 then
+ return -1
+endi
+if $data00 != @21-05-12 10:10:12.000@ then
+ print $data00
+ return -1
+endi
+if $data01 != 6 then
+ return -1
+endi
+if $data02 != 37.000000000 then
+ print expect 37.000000000, acutal: $data02
+ return -1
+endi
+if $data03 != 27 then
+ return -1
+endi
+if $data04 != @70-01-01 07:59:57.000@ then
+ return -1
+endi
+if $data05 != @21-05-12 10:10:12.000@ then
+ print $data00
+ return -1
+endi
+if $data06 != @70-01-01 07:59:57.000@ then
+ return -1
+endi
+
+sql select last(*), id from st2 group by id order by id
+print ===> $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
+print ===> $data10 $data11 $data12 $data13 $data14 $data15 $data16 $data17 $data18 $data19
+print ===> $data20 $data21 $data22 $data23 $data24 $data25 $data26 $data27 $data28 $data29
+print ===> $data30 $data31 $data32 $data33 $data34 $data35 $data36 $data37 $data38 $data39
+print ===> $data40 $data41 $data42 $data43 $data44 $data45 $data46 $data47 $data48 $data49
+
+if $rows != 5 then
+ return -1
+endi
+if $data00 != @21-05-12 10:10:12.000@ then
+ return -1
+endi
+if $data01 != 6 then
+ return -1
+endi
+if $data02 != 5.000000000 then
+ print $data02
+ return -1
+endi
+if $data03 != 21 then
+ return -1
+endi
+if $data04 != @70-01-01 07:59:57.000@ then
+ return -1
+endi
+if $data05 != 1 then
+ return -1
+endi
+if $data10 != @21-05-11 10:12:23.000@ then
+ return -1
+endi
+if $data11 != 22 then
+ return -1
+endi
+if $data12 != 23.000000000 then
+ print $data02
+ return -1
+endi
+if $data13 != -8 then
+ return -1
+endi
+if $data14 != @70-01-01 07:59:58.-04@ then
+ return -1
+endi
+if $data15 != 2 then
+ return -1
+endi
+if $data20 != @21-05-10 10:12:24.000@ then
+ return -1
+endi
+if $data21 != 24 then
+ return -1
+endi
+if $data22 != 11.000000000 then
+ print expect 11.000000000 actual: $data22
+ return -1
+endi
+if $data23 != 25 then
+ return -1
+endi
+if $data24 != @70-01-01 07:59:57.-04@ then =
+ return -1
+endi
+if $data25 != 3 then
+ return -1
+endi
+if $data30 != @21-05-11 10:12:25.000@ then
+ return -1
+endi
+if $data31 != 26 then
+ return -1
+endi
+if $data32 != 17.000000000 then
+ print $data02
+ return -1
+endi
+if $data33 != 27 then
+ return -1
+endi
+if $data34 != @70-01-01 07:59:56.-04@ then
+ return -1
+endi
+if $data35 != 4 then
+ return -1
+endi
+if $data40 != @21-05-11 10:12:29.000@ then
+ return -1
+endi
+if $data41 != 36 then
+ return -1
+endi
+if $data42 != 37.000000000 then
+ print $data02
+ return -1
+endi
+if $data43 != 35 then
+ return -1
+endi
+if $data44 != @70-01-01 07:59:56.-05@ then
+ return -1
+endi
+if $data45 != 5 then
+ return -1
+endi
+
+sql select last_row(*), id from st2 group by id order by id
+print ===> $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
+print ===> $data10 $data11 $data12 $data13 $data14 $data15 $data16 $data17 $data18 $data19
+print ===> $data20 $data21 $data22 $data23 $data24 $data25 $data26 $data27 $data28 $data29
+print ===> $data30 $data31 $data32 $data33 $data34 $data35 $data36 $data37 $data38 $data39
+print ===> $data40 $data41 $data42 $data43 $data44 $data45 $data46 $data47 $data48 $data49
+
+if $rows != 5 then
+ return -1
+endi
+if $data00 != @21-05-12 10:10:12.000@ then
+ return -1
+endi
+if $data01 != 6 then
+ return -1
+endi
+if $data02 != NULL then
+ print $data02
+ return -1
+endi
+if $data03 != NULL then
+ return -1
+endi
+if $data04 != @70-01-01 07:59:57.000@ then
+ return -1
+endi
+if $data05 != 1 then
+ return -1
+endi
+if $data10 != @21-05-11 10:12:23.000@ then
+ return -1
+endi
+if $data11 != 22 then
+ return -1
+endi
+if $data12 != 23.000000000 then
+ print $data02
+ return -1
+endi
+if $data13 != NULL then
+ return -1
+endi
+if $data14 != @70-01-01 07:59:58.-04@ then
+ return -1
+endi
+if $data15 != 2 then
+ return -1
+endi
+if $data20 != @21-05-10 10:12:24.000@ then
+ return -1
+endi
+if $data21 != 24 then
+ return -1
+endi
+if $data22 != NULL then
+ print expect NULL actual: $data22
+ return -1
+endi
+if $data23 != 25 then
+ return -1
+endi
+if $data24 != @70-01-01 07:59:57.-04@ then =
+ return -1
+endi
+if $data25 != 3 then
+ return -1
+endi
+if $data30 != @21-05-11 10:12:25.000@ then
+ return -1
+endi
+if $data31 != 26 then
+ return -1
+endi
+if $data32 != NULL then
+ print $data02
+ return -1
+endi
+if $data33 != 27 then
+ return -1
+endi
+if $data34 != @70-01-01 07:59:56.-04@ then
+ return -1
+endi
+if $data35 != 4 then
+ return -1
+endi
+if $data40 != @21-05-11 10:12:29.000@ then
+ return -1
+endi
+#if $data41 != NULL then
+# return -1
+#endi
+#if $data42 != NULL then
+# print $data02
+# return -1
+#endi
+if $data43 != NULL then
+ return -1
+endi
+#if $data44 != NULL then
+# return -1
+#endi
+if $data45 != 5 then
+ return -1
+endi
+
+print "test tbn"
+sql create table if not exists tbn (ts timestamp, f1 int, f2 double, f3 binary(10), f4 timestamp)
+sql insert into tbn values ("2021-05-09 10:10:10", 1, 2.0, '3', -1000)
+sql insert into tbn values ("2021-05-10 10:10:11", 4, 5.0, NULL, -2000)
+sql insert into tbn values ("2021-05-12 10:10:12", 6,NULL, NULL, -3000)
+sql insert into tbn values ("2021-05-13 10:10:12", NULL,NULL, NULL,NULL)
+
+sql select last(*) from tbn;
+if $rows != 1 then
+ return -1
+endi
+if $data00 != @21-05-13 10:10:12.000@ then
+ print $data00
+ return -1
+endi
+if $data01 != 6 then
+ return -1
+endi
+if $data02 != 5.000000000 then
+ print $data02
+ return -1
+endi
+if $data03 != 3 then
+ return -1
+endi
+if $data04 != @70-01-01 07:59:57.000@ then
+ return -1
+endi
+
+sql alter table tbn add column c1 int;
+sql alter table tbn drop column c1;
diff --git a/tests/script/tsim/parser/last_cache_query.sim b/tests/script/tsim/parser/last_cache_query.sim
index 6cd5309590..30196e0b62 100644
--- a/tests/script/tsim/parser/last_cache_query.sim
+++ b/tests/script/tsim/parser/last_cache_query.sim
@@ -386,3 +386,5 @@ if $data04 != @70-01-01 07:59:57.000@ then
return -1
endi
+sql alter table tbn add column c1 int;
+sql alter table tbn drop column c1;
diff --git a/tests/script/tsim/query/query_count1.sim b/tests/script/tsim/query/query_count1.sim
index 0694ab062a..043b604263 100644
--- a/tests/script/tsim/query/query_count1.sim
+++ b/tests/script/tsim/query/query_count1.sim
@@ -79,5 +79,27 @@ if $data22 != 4 then
goto loop3
endi
+
+print step2
+print =============== create database
+sql create database test1 vgroups 1;
+sql use test1;
+
+sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int);
+sql create table t1 using st tags(1,1,1);
+sql create table t2 using st tags(2,2,2);
+
+#2~INT32_MAX
+sql_error select _wstart as s, count(*) c1, sum(b), max(c) from t1 count_window(-1);
+sql_error select _wstart as s, count(*) c1, sum(b), max(c) from t1 count_window(0);
+sql_error select _wstart as s, count(*) c1, sum(b), max(c) from t1 count_window(1);
+sql_error select _wstart as s, count(*) c1, sum(b), max(c) from t1 count_window(2147483648);
+sql_error select _wstart as s, count(*) c1, sum(b), max(c) from t1 count_window(10, 0);
+sql_error select _wstart as s, count(*) c1, sum(b), max(c) from t1 count_window(10, -1);
+sql_error select _wstart as s, count(*) c1, sum(b), max(c) from t1 count_window(10, 11);
+
+sql select _wstart as s, count(*) c1, sum(b), max(c) from t1 count_window(2);
+sql select _wstart as s, count(*) c1, sum(b), max(c) from t1 count_window(2147483647);
+
print query_count0 end
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/system-test/0-others/com_alltypedata.json b/tests/system-test/0-others/com_alltypedata.json
index 0e6d8e3a07..1499ca7670 100644
--- a/tests/system-test/0-others/com_alltypedata.json
+++ b/tests/system-test/0-others/com_alltypedata.json
@@ -22,7 +22,7 @@
"vgroups": 2,
"replica": 1,
"precision": "ms",
- "stt_trigger": 8,
+ "stt_trigger": 1,
"minRows": 100,
"maxRows": 4096
},
diff --git a/tests/system-test/0-others/compatibility.py b/tests/system-test/0-others/compatibility.py
index c936cf1ae4..8163177a3b 100644
--- a/tests/system-test/0-others/compatibility.py
+++ b/tests/system-test/0-others/compatibility.py
@@ -1,11 +1,13 @@
from urllib.parse import uses_relative
import taos
+import taosws
import sys
import os
import time
import platform
import inspect
from taos.tmq import Consumer
+from taos.tmq import *
from pathlib import Path
from util.log import *
@@ -17,7 +19,7 @@ from util.dnodes import TDDnode
from util.cluster import *
import subprocess
-BASEVERSION = "3.0.2.3"
+BASEVERSION = "3.2.0.0"
class TDTestCase:
def caseDescription(self):
f'''
@@ -30,7 +32,7 @@ class TDTestCase:
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
- self.deletedDataSql= '''drop database if exists deldata;create database deldata duration 300 stt_trigger 4; ;use deldata;
+ self.deletedDataSql= '''drop database if exists deldata;create database deldata duration 300 stt_trigger 1; ;use deldata;
create table deldata.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int);
create table deldata.ct1 using deldata.stb1 tags ( 1 );
insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a );
@@ -104,8 +106,19 @@ class TDTestCase:
print(f"{packageName} has been exists")
os.system(f" cd {packagePath} && tar xvf {packageName} && cd {packageTPath} && ./install.sh -e no " )
tdDnodes.stop(1)
- print(f"start taosd: rm -rf {dataPath}/* && nohup taosd -c {cPath} & ")
- os.system(f"rm -rf {dataPath}/* && nohup taosd -c {cPath} & " )
+ print(f"start taosd: rm -rf {dataPath}/* && nohup /usr/bin/taosd -c {cPath} & ")
+ os.system(f"rm -rf {dataPath}/* && nohup /usr/bin/taosd -c {cPath} & " )
+ os.system(f"killall taosadapter" )
+ os.system(f"cp /etc/taos/taosadapter.toml {cPath}/taosadapter.toml " )
+ taosadapter_cfg = cPath + "/taosadapter.toml"
+ taosadapter_log_path = cPath + "/../log/"
+ print(f"taosadapter_cfg:{taosadapter_cfg},taosadapter_log_path:{taosadapter_log_path} ")
+ self.alter_string_in_file(taosadapter_cfg,"#path = \"/var/log/taos\"",f"path = \"{taosadapter_log_path}\"")
+ self.alter_string_in_file(taosadapter_cfg,"taosConfigDir = \"\"",f"taosConfigDir = \"{cPath}\"")
+ print("/usr/bin/taosadapter --version")
+ os.system(f" /usr/bin/taosadapter --version" )
+ print(f" LD_LIBRARY_PATH=/usr/lib -c {taosadapter_cfg} 2>&1 & ")
+ os.system(f" LD_LIBRARY_PATH=/usr/lib /usr/bin/taosadapter -c {taosadapter_cfg} 2>&1 & " )
sleep(5)
@@ -116,7 +129,24 @@ class TDTestCase:
def is_list_same_as_ordered_list(self,unordered_list, ordered_list):
sorted_list = sorted(unordered_list)
return sorted_list == ordered_list
-
+
+ def alter_string_in_file(self,file,old_str,new_str):
+ """
+ replace str in file
+ :param file
+ :param old_str
+ :param new_str
+ :return:
+ """
+ file_data = ""
+ with open(file, "r", encoding="utf-8") as f:
+ for line in f:
+ if old_str in line:
+ line = line.replace(old_str,new_str)
+ file_data += line
+ with open(file,"w",encoding="utf-8") as f:
+ f.write(file_data)
+
def run(self):
scriptsPath = os.path.dirname(os.path.realpath(__file__))
distro_id = distro.id()
@@ -131,7 +161,7 @@ class TDTestCase:
dbname = "test"
stb = f"{dbname}.meters"
self.installTaosd(bPath,cPath)
- os.system("echo 'debugFlag 143' > /etc/taos/taos.cfg ")
+ # os.system(f"echo 'debugFlag 143' >> {cPath}/taos.cfg ")
tableNumbers=100
recordNumbers1=100
recordNumbers2=1000
@@ -163,11 +193,46 @@ class TDTestCase:
# os.system(f"LD_LIBRARY_PATH=/usr/lib taos -s 'use test;create stream current_stream into current_stream_output_stb as select _wstart as `start`, _wend as wend, max(current) as max_current from meters where voltage <= 220 interval (5s);' ")
# os.system('LD_LIBRARY_PATH=/usr/lib taos -s "use test;create stream power_stream into power_stream_output_stb as select ts, concat_ws(\\".\\", location, tbname) as meter_location, current*voltage*cos(phase) as active_power, current*voltage*sin(phase) as reactive_power from meters partition by tbname;" ')
# os.system('LD_LIBRARY_PATH=/usr/lib taos -s "use test;show streams;" ')
- os.system(f"sed -i 's/\/etc\/taos/{cPath}/' 0-others/tmqBasic.json ")
+ self.alter_string_in_file("0-others/tmqBasic.json", "/etc/taos/", cPath)
# os.system("LD_LIBRARY_PATH=/usr/lib taosBenchmark -f 0-others/tmqBasic.json -y ")
os.system('LD_LIBRARY_PATH=/usr/lib taos -s "create topic if not exists tmq_test_topic as select current,voltage,phase from test.meters where voltage <= 106 and current <= 5;" ')
os.system('LD_LIBRARY_PATH=/usr/lib taos -s "use test;show topics;" ')
+ os.system(f" /usr/bin/taosadapter --version " )
+ consumer_dict = {
+ "group.id": "g1",
+ "td.connect.user": "root",
+ "td.connect.pass": "taosdata",
+ "auto.offset.reset": "earliest",
+ }
+ consumer = taosws.Consumer(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
+ try:
+ consumer.subscribe(["tmq_test_topic"])
+ except TmqError:
+ tdLog.exit(f"subscribe error")
+
+ while True:
+ message = consumer.poll(timeout=1.0)
+ if message:
+ print("message")
+ id = message.vgroup()
+ topic = message.topic()
+ database = message.database()
+
+ for block in message:
+ nrows = block.nrows()
+ ncols = block.ncols()
+ for row in block:
+ print(row)
+ values = block.fetchall()
+ print(nrows, ncols)
+
+ consumer.commit(message)
+ else:
+ print("break")
+ break
+
+ consumer.close()
tdLog.info(" LD_LIBRARY_PATH=/usr/lib taosBenchmark -f 0-others/compa4096.json -y ")
os.system("LD_LIBRARY_PATH=/usr/lib taosBenchmark -f 0-others/compa4096.json -y")
os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'flush database db4096 '")
@@ -184,7 +249,8 @@ class TDTestCase:
os.system("pkill taosd") # make sure all the data are saved in disk.
self.checkProcessPid("taosd")
-
+ os.system("pkill taosadapter") # make sure all the data are saved in disk.
+ self.checkProcessPid("taosadapter")
tdLog.printNoPrefix("==========step2:update new version ")
self.buildTaosd(bPath)
@@ -193,6 +259,7 @@ class TDTestCase:
tdsql=tdCom.newTdSql()
print(tdsql)
cmd = f" LD_LIBRARY_PATH=/usr/lib taos -h localhost ;"
+ print(os.system(cmd))
if os.system(cmd) == 0:
raise Exception("failed to execute system command. cmd: %s" % cmd)
diff --git a/tests/system-test/7-tmq/tmqVnodeTransform.py b/tests/system-test/7-tmq/tmqVnodeTransform-db-removewal.py
similarity index 78%
rename from tests/system-test/7-tmq/tmqVnodeTransform.py
rename to tests/system-test/7-tmq/tmqVnodeTransform-db-removewal.py
index c2b002ead6..a853489c3f 100644
--- a/tests/system-test/7-tmq/tmqVnodeTransform.py
+++ b/tests/system-test/7-tmq/tmqVnodeTransform-db-removewal.py
@@ -122,135 +122,7 @@ class TDTestCase:
tdLog.debug(f"redistributeSql:{redistributeSql}")
tdSql.query(redistributeSql)
tdLog.debug("redistributeSql ok")
-
- def tmqCase1(self):
- tdLog.printNoPrefix("======== test case 1: ")
- paraDict = {'dbName': 'dbt',
- 'dropFlag': 1,
- 'event': '',
- 'vgroups': 1,
- 'stbName': 'stb',
- 'colPrefix': 'c',
- 'tagPrefix': 't',
- 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
- 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
- 'ctbPrefix': 'ctb',
- 'ctbStartIdx': 0,
- 'ctbNum': 10,
- 'rowsPerTbl': 1000,
- 'batchNum': 10,
- 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
- 'pollDelay': 60,
- 'showMsg': 1,
- 'showRow': 1,
- 'snapshot': 0}
-
- paraDict['vgroups'] = self.vgroups
- paraDict['ctbNum'] = self.ctbNum
- paraDict['rowsPerTbl'] = self.rowsPerTbl
-
- topicNameList = ['topic1']
- # expectRowsList = []
- tmqCom.initConsumerTable()
-
- tdLog.info("create topics from stb with filter")
- queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
- # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
- sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
- tdLog.info("create topic sql: %s"%sqlString)
- tdSql.execute(sqlString)
- # tdSql.query(queryString)
- # expectRowsList.append(tdSql.getRows())
-
- # init consume info, and start tmq_sim, then check consume result
- tdLog.info("insert consume info to consume processor")
- consumerId = 0
- expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
- topicList = topicNameList[0]
- ifcheckdata = 1
- ifManualCommit = 1
- keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:200, auto.offset.reset:earliest'
- tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
- tdLog.info("start consume processor")
- tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
- tdLog.info("create ctb1")
- tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
- ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
- tdLog.info("insert data")
- pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
-
- tmqCom.getStartConsumeNotifyFromTmqsim()
- tmqCom.getStartCommitNotifyFromTmqsim()
-
- #restart dnode & remove wal
- self.restartAndRemoveWal()
-
- # redistribute vgroup
- self.redistributeVgroups();
-
- tdLog.info("create ctb2")
- paraDict['ctbPrefix'] = "ctbn"
- tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
- ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
- tdLog.info("insert data")
- pInsertThread1 = tmqCom.asyncInsertDataByInterlace(paraDict)
- pInsertThread.join()
- pInsertThread1.join()
-
- expectRows = 1
- resultList = tmqCom.selectConsumeResult(expectRows)
-
- if expectrowcnt / 2 > resultList[0]:
- tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectrowcnt / 2, resultList[0]))
- tdLog.exit("%d tmq consume rows error!"%consumerId)
-
- # tmqCom.checkFileContent(consumerId, queryString)
-
- time.sleep(10)
- for i in range(len(topicNameList)):
- tdSql.query("drop topic %s"%topicNameList[i])
-
- tdLog.printNoPrefix("======== test case 1 end ...... ")
-
- def tmqCase2(self):
- tdLog.printNoPrefix("======== test case 2: ")
- paraDict = {'dbName':'dbt'}
-
- ntbName = "ntb"
-
- topicNameList = ['topic2']
- tmqCom.initConsumerTable()
-
- sqlString = "create table %s.%s(ts timestamp, i nchar(8))" %(paraDict['dbName'], ntbName)
- tdLog.info("create nomal table sql: %s"%sqlString)
- tdSql.execute(sqlString)
-
- tdLog.info("create topics from nomal table")
- queryString = "select * from %s.%s"%(paraDict['dbName'], ntbName)
- sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
- tdLog.info("create topic sql: %s"%sqlString)
- tdSql.execute(sqlString)
- tdSql.query("flush database %s"%(paraDict['dbName']))
- #restart dnode & remove wal
- self.restartAndRemoveWal()
-
- # redistribute vgroup
- self.redistributeVgroups();
-
- sqlString = "alter table %s.%s modify column i nchar(16)" %(paraDict['dbName'], ntbName)
- tdLog.info("alter table sql: %s"%sqlString)
- tdSql.error(sqlString)
- expectRows = 0
- resultList = tmqCom.selectConsumeResult(expectRows)
- time.sleep(1)
- for i in range(len(topicNameList)):
- tdSql.query("drop topic %s"%topicNameList[i])
-
- tdLog.printNoPrefix("======== test case 2 end ...... ")
-
+
def tmqCase3(self):
tdLog.printNoPrefix("======== test case 3: ")
paraDict = {'dbName': 'dbt',
@@ -330,12 +202,90 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 3 end ...... ")
+ def tmqCaseDbname(self):
+ tdLog.printNoPrefix("======== test case 4 subscrib Dbname start: ")
+ paraDict = {'dbName': 'dbt',
+ 'dropFlag': 1,
+ 'event': '',
+ 'vgroups': 1,
+ 'stbName': 'stbn',
+ 'colPrefix': 'c',
+ 'tagPrefix': 't',
+ 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
+ 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
+ 'ctbPrefix': 'ctb',
+ 'ctbStartIdx': 0,
+ 'ctbNum': 10,
+ 'rowsPerTbl': 1000,
+ 'batchNum': 10,
+ 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
+ 'pollDelay': 10,
+ 'showMsg': 1,
+ 'showRow': 1,
+ 'snapshot': 0}
+
+ paraDict['vgroups'] = self.vgroups
+ paraDict['ctbNum'] = self.ctbNum
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+
+ topicNameList = ['topic4']
+ tmqCom.initConsumerTable()
+
+ tdLog.info("create stb")
+ tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
+
+ tdLog.info("create ctb")
+ tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
+ ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
+ tdLog.info("insert data")
+ tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
+ ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
+ startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+
+ tdLog.info("create topics from database ")
+ queryString = "database %s "%(paraDict['dbName'])
+ sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
+ tdLog.info("create topic sql: %s"%sqlString)
+ tdSql.execute(sqlString)
+
+ # init consume info, and start tmq_sim, then check consume result
+ tdLog.info("insert consume info to consume processor")
+ consumerId = 0
+ expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
+ topicList = topicNameList[0]
+ ifcheckdata = 1
+ ifManualCommit = 1
+ keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:200, auto.offset.reset:earliest'
+ tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+
+ tdLog.info("start consume processor")
+ tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
+ tdLog.info("wait the consume result")
+
+ time.sleep(1)
+ # restart dnode & remove wal
+ self.restartAndRemoveWal()
+
+ # redistribute vgroup
+ self.redistributeVgroups()
+
+ tdLog.info("start consume processor")
+ tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
+ tdLog.info("wait the consume result")
+ expectRows = 2
+ resultList = tmqCom.selectConsumeResult(expectRows)
+
+ time.sleep(6)
+ for i in range(len(topicNameList)):
+ tdSql.query("drop topic %s"%topicNameList[i])
+
+ tdLog.printNoPrefix("======== test case 4 subscrib Dbname end ...... ")
+
def run(self):
- self.prepareTestEnv()
- self.tmqCase1()
- self.tmqCase2()
self.prepareTestEnv()
self.tmqCase3()
+ self.prepareTestEnv()
+ self.tmqCaseDbname()
def stop(self):
tdSql.close()
diff --git a/tests/system-test/7-tmq/tmqVnodeTransform-stb-removewal.py b/tests/system-test/7-tmq/tmqVnodeTransform-stb-removewal.py
new file mode 100644
index 0000000000..40879d5c66
--- /dev/null
+++ b/tests/system-test/7-tmq/tmqVnodeTransform-stb-removewal.py
@@ -0,0 +1,266 @@
+
+import taos
+import sys
+import time
+import socket
+import os
+import threading
+import math
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+from util.common import *
+from util.cluster import *
+sys.path.append("./7-tmq")
+from tmqCommon import *
+
+class TDTestCase:
+ def __init__(self):
+ self.vgroups = 1
+ self.ctbNum = 10
+ self.rowsPerTbl = 1000
+
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor(), True)
+
+ def getDataPath(self):
+ selfPath = tdCom.getBuildPath()
+
+ return selfPath + '/../sim/dnode%d/data/vnode/vnode%d/wal/*';
+
+ def prepareTestEnv(self):
+ tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
+ paraDict = {'dbName': 'dbt',
+ 'dropFlag': 1,
+ 'event': '',
+ 'vgroups': 1,
+ 'stbName': 'stb',
+ 'colPrefix': 'c',
+ 'tagPrefix': 't',
+ 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
+ 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
+ 'ctbPrefix': 'ctb',
+ 'ctbStartIdx': 0,
+ 'ctbNum': 10,
+ 'rowsPerTbl': 1000,
+ 'batchNum': 10,
+ 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
+ 'pollDelay': 60,
+ 'showMsg': 1,
+ 'showRow': 1,
+ 'snapshot': 0}
+
+ paraDict['vgroups'] = self.vgroups
+ paraDict['ctbNum'] = self.ctbNum
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+
+ tdCom.drop_all_db()
+ tmqCom.initConsumerTable()
+ tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], wal_retention_period=36000,vgroups=paraDict["vgroups"],replica=self.replicaVar)
+ tdLog.info("create stb")
+ tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
+ # tdLog.info("create ctb")
+ # tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
+ # ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
+ # tdLog.info("insert data")
+ # tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
+ # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
+ # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
+ # tdDnodes.stop(1)
+ # tdDnodes.start(1)
+ # tdSql.query("flush database %s"%(paraDict['dbName']))
+ return
+
+ def restartAndRemoveWal(self):
+ tdDnodes = cluster.dnodes
+ tdSql.query("select * from information_schema.ins_vnodes")
+ for result in tdSql.queryResult:
+ if result[2] == 'dbt':
+ tdLog.debug("dnode is %d"%(result[0]))
+ dnodeId = result[0]
+ vnodeId = result[1]
+
+ tdDnodes[dnodeId - 1].stoptaosd()
+ time.sleep(1)
+ dataPath = self.getDataPath()
+ dataPath = dataPath%(dnodeId,vnodeId)
+ os.system('rm -rf ' + dataPath)
+ tdLog.debug("dataPath:%s"%dataPath)
+ tdDnodes[dnodeId - 1].starttaosd()
+ time.sleep(1)
+ break
+ tdLog.debug("restart dnode ok")
+
+ def redistributeVgroups(self):
+ dnodesList = []
+ tdSql.query("show dnodes")
+ for result in tdSql.queryResult:
+ dnodesList.append(result[0])
+ print("dnodeList:",dnodesList)
+ tdSql.query("select * from information_schema.ins_vnodes")
+ vnodeId = 0
+ for result in tdSql.queryResult:
+ if result[2] == 'dbt':
+ tdLog.debug("dnode is %d"%(result[0]))
+ dnodesList.remove(result[0])
+ vnodeId = result[1]
+ print("its all data",dnodesList)
+ # if self.replicaVar == 1:
+ # redistributeSql = "redistribute vgroup %d dnode %d" %(vnodeId, dnodesList[0])
+ # else:
+ redistributeSql = f"redistribute vgroup {vnodeId} "
+ for vgdnode in dnodesList:
+ redistributeSql += f"dnode {vgdnode} "
+ print(redistributeSql)
+
+ tdLog.debug(f"redistributeSql:{redistributeSql}")
+ tdSql.query(redistributeSql)
+ tdLog.debug("redistributeSql ok")
+
+ def tmqCase1(self):
+ tdLog.printNoPrefix("======== test case 1: ")
+ paraDict = {'dbName': 'dbt',
+ 'dropFlag': 1,
+ 'event': '',
+ 'vgroups': 1,
+ 'stbName': 'stb',
+ 'colPrefix': 'c',
+ 'tagPrefix': 't',
+ 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
+ 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
+ 'ctbPrefix': 'ctb',
+ 'ctbStartIdx': 0,
+ 'ctbNum': 10,
+ 'rowsPerTbl': 1000,
+ 'batchNum': 10,
+ 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
+ 'pollDelay': 60,
+ 'showMsg': 1,
+ 'showRow': 1,
+ 'snapshot': 0}
+
+ paraDict['vgroups'] = self.vgroups
+ paraDict['ctbNum'] = self.ctbNum
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+
+ topicNameList = ['topic1']
+ # expectRowsList = []
+ tmqCom.initConsumerTable()
+
+ tdLog.info("create topics from stb with filter")
+ queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
+ # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
+ sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
+ tdLog.info("create topic sql: %s"%sqlString)
+ tdSql.execute(sqlString)
+ # tdSql.query(queryString)
+ # expectRowsList.append(tdSql.getRows())
+
+ # init consume info, and start tmq_sim, then check consume result
+ tdLog.info("insert consume info to consume processor")
+ consumerId = 0
+ expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
+ topicList = topicNameList[0]
+ ifcheckdata = 1
+ ifManualCommit = 1
+ keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:200, auto.offset.reset:earliest'
+ tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+
+ tdLog.info("start consume processor")
+ tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
+ tdLog.info("wait the consume result")
+
+ tdLog.info("create ctb1")
+ tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
+ ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
+ tdLog.info("insert data")
+ pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
+
+ tmqCom.getStartConsumeNotifyFromTmqsim()
+ tmqCom.getStartCommitNotifyFromTmqsim()
+
+ #restart dnode & remove wal
+ self.restartAndRemoveWal()
+
+ # redistribute vgroup
+ self.redistributeVgroups();
+
+ tdLog.info("create ctb2")
+ paraDict['ctbPrefix'] = "ctbn"
+ tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
+ ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
+ tdLog.info("insert data")
+ pInsertThread1 = tmqCom.asyncInsertDataByInterlace(paraDict)
+ pInsertThread.join()
+ pInsertThread1.join()
+
+ expectRows = 1
+ resultList = tmqCom.selectConsumeResult(expectRows)
+
+ if expectrowcnt / 2 > resultList[0]:
+ tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectrowcnt / 2, resultList[0]))
+ tdLog.exit("%d tmq consume rows error!"%consumerId)
+
+ # tmqCom.checkFileContent(consumerId, queryString)
+
+ time.sleep(10)
+ for i in range(len(topicNameList)):
+ tdSql.query("drop topic %s"%topicNameList[i])
+
+ tdLog.printNoPrefix("======== test case 1 end ...... ")
+
+ def tmqCase2(self):
+ tdLog.printNoPrefix("======== test case 2: ")
+ paraDict = {'dbName':'dbt'}
+
+ ntbName = "ntb"
+
+ topicNameList = ['topic2']
+ tmqCom.initConsumerTable()
+
+ sqlString = "create table %s.%s(ts timestamp, i nchar(8))" %(paraDict['dbName'], ntbName)
+ tdLog.info("create nomal table sql: %s"%sqlString)
+ tdSql.execute(sqlString)
+
+ tdLog.info("create topics from nomal table")
+ queryString = "select * from %s.%s"%(paraDict['dbName'], ntbName)
+ sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
+ tdLog.info("create topic sql: %s"%sqlString)
+ tdSql.execute(sqlString)
+ tdSql.query("flush database %s"%(paraDict['dbName']))
+ #restart dnode & remove wal
+ self.restartAndRemoveWal()
+
+ # redistribute vgroup
+ self.redistributeVgroups();
+
+ sqlString = "alter table %s.%s modify column i nchar(16)" %(paraDict['dbName'], ntbName)
+ tdLog.info("alter table sql: %s"%sqlString)
+ tdSql.error(sqlString)
+ expectRows = 0
+ resultList = tmqCom.selectConsumeResult(expectRows)
+ time.sleep(1)
+ for i in range(len(topicNameList)):
+ tdSql.query("drop topic %s"%topicNameList[i])
+
+ tdLog.printNoPrefix("======== test case 2 end ...... ")
+
+ def run(self):
+ self.prepareTestEnv()
+ self.tmqCase1()
+ self.tmqCase2()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+event = threading.Event()
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/utils/test/c/tmq_taosx_ci.c b/utils/test/c/tmq_taosx_ci.c
index 2257089f06..5012e50bab 100644
--- a/utils/test/c/tmq_taosx_ci.c
+++ b/utils/test/c/tmq_taosx_ci.c
@@ -574,6 +574,7 @@ tmq_t* build_consumer() {
tmq_conf_set(conf, "msg.with.table.name", "true");
tmq_conf_set(conf, "enable.auto.commit", "true");
tmq_conf_set(conf, "auto.offset.reset", "earliest");
+ tmq_conf_set(conf, "msg.consume.excluded", "1");
if (g_conf.snapShot) {
tmq_conf_set(conf, "experimental.snapshot.enable", "true");