Merge branch '3.0' into fix/TS-4421-3.0
This commit is contained in:
commit
a083f45ac1
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.2.3.0.alpha")
|
||||
SET(TD_VER_NUMBER "3.2.4.0.alpha")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -842,12 +842,12 @@ consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
|||
|
||||
In addition to native connections, the client library also supports subscriptions via websockets.
|
||||
|
||||
The syntax for creating a consumer is "consumer = consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../develop/tmq/#create-a-consumer).
|
||||
The syntax for creating a consumer is "consumer = Consumer(conf=configs)". You need to specify that the `td.connect.websocket.scheme` parameter is set to "ws" in the configuration. For more subscription api parameters, please refer to [Data Subscription](../../develop/tmq/#create-a-consumer).
|
||||
|
||||
```python
|
||||
import taosws
|
||||
|
||||
consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
|
||||
consumer = taosws.Consumer(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -887,13 +887,13 @@ The `poll` function is used to consume data in tmq. The parameter of the `poll`
|
|||
|
||||
```python
|
||||
while True:
|
||||
res = consumer.poll(1)
|
||||
if not res:
|
||||
message = consumer.poll(1)
|
||||
if not message:
|
||||
continue
|
||||
err = res.error()
|
||||
err = message.error()
|
||||
if err is not None:
|
||||
raise err
|
||||
val = res.value()
|
||||
val = message.value()
|
||||
|
||||
for block in val:
|
||||
print(block.fetchall())
|
||||
|
@ -902,16 +902,14 @@ while True:
|
|||
</TabItem>
|
||||
<TabItem value="websocket" label="WebSocket connection">
|
||||
|
||||
The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out. You have to handle error messages in response data.
|
||||
The `poll` function is used to consume data in tmq. The parameter of the `poll` function is a value of type float representing the timeout in seconds. It returns a `Message` before timing out, or `None` on timing out.
|
||||
|
||||
```python
|
||||
while True:
|
||||
res = consumer.poll(timeout=1.0)
|
||||
if not res:
|
||||
message = consumer.poll(1)
|
||||
if not message:
|
||||
continue
|
||||
err = res.error()
|
||||
if err is not None:
|
||||
raise err
|
||||
|
||||
for block in message:
|
||||
for row in block:
|
||||
print(row)
|
||||
|
|
|
@ -41,16 +41,28 @@ window_clause: {
|
|||
SESSION(ts_col, tol_val)
|
||||
| STATE_WINDOW(col)
|
||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)]
|
||||
| EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition
|
||||
| COUNT_WINDOW(count_val[, sliding_val])
|
||||
}
|
||||
```
|
||||
|
||||
`SESSION` indicates a session window, and `tol_val` indicates the maximum range of the time interval. If the time interval between two continuous rows are within the time interval specified by `tol_val` they belong to the same session window; otherwise a new session window is started automatically.
|
||||
|
||||
`EVENT_WINDOW` is determined according to the window start condition and the window close condition. The window is started when `start_trigger_condition` is evaluated to true, the window is closed when `end_trigger_condition` is evaluated to true. `start_trigger_condition` and `end_trigger_condition` can be any conditional expressions supported by TDengine and can include multiple columns.
|
||||
|
||||
`COUNT_WINDOW` is a counting window that is divided by a fixed number of data rows.`count_val`: A constant, which is a positive integer and must be greater than or equal to 2. The maximum value is 2147483648. `count_val` represents the maximum number of data rows contained in each `COUNT_WINDOW`. When the total number of data rows cannot be divided by `count_val`, the number of rows in the last window will be less than `count_val`. `sliding_val`: is a constant that represents the number of window slides, similar to `SLIDING` in `INTERVAL`.
|
||||
|
||||
For example, the following SQL statement creates a stream and automatically creates a supertable named `avg_vol`. The stream has a 1 minute time window that slides forward in 30 second intervals to calculate the average voltage of the meters supertable.
|
||||
|
||||
```sql
|
||||
CREATE STREAM avg_vol_s INTO avg_vol AS
|
||||
SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
|
||||
|
||||
CREATE STREAM streams0 INTO streamt0 AS
|
||||
SELECT _wstart, count(*), avg(voltage) from meters PARTITION BY tbname EVENT_WINDOW START WITH voltage < 0 END WITH voltage > 9;
|
||||
|
||||
CREATE STREAM streams1 IGNORE EXPIRED 1 WATERMARK 100s INTO streamt1 AS
|
||||
SELECT _wstart, count(*), avg(voltage) from meters PARTITION BY tbname COUNT_WINDOW(10);
|
||||
```
|
||||
|
||||
## Partitions of Stream
|
||||
|
|
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.2.3.0
|
||||
|
||||
<Release type="tdengine" version="3.2.3.0" />
|
||||
|
||||
## 3.2.2.0
|
||||
|
||||
<Release type="tdengine" version="3.2.2.0" />
|
||||
|
|
|
@ -856,7 +856,7 @@ taosws `Consumer` API 提供了基于 Websocket 订阅 TMQ 数据的 API。创
|
|||
```python
|
||||
import taosws
|
||||
|
||||
consumer = taosws.(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
|
||||
consumer = taosws.Consumer(conf={"group.id": "local", "td.connect.websocket.scheme": "ws"})
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
@ -896,13 +896,13 @@ Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 flo
|
|||
|
||||
```python
|
||||
while True:
|
||||
res = consumer.poll(1)
|
||||
if not res:
|
||||
message = consumer.poll(1)
|
||||
if not message:
|
||||
continue
|
||||
err = res.error()
|
||||
err = message.error()
|
||||
if err is not None:
|
||||
raise err
|
||||
val = res.value()
|
||||
val = message.value()
|
||||
|
||||
for block in val:
|
||||
print(block.fetchall())
|
||||
|
@ -911,16 +911,14 @@ while True:
|
|||
</TabItem>
|
||||
<TabItem value="websocket" label="WebSocket 连接">
|
||||
|
||||
Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间,超时时间单位为秒(s),`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。消费者必须通过 Message 的 `error()` 方法校验返回数据的 error 信息。
|
||||
Consumer API 的 `poll` 方法用于消费数据,`poll` 方法接收一个 float 类型的超时时间,超时时间单位为秒(s),`poll` 方法在超时之前返回一条 Message 类型的数据或超时返回 `None`。
|
||||
|
||||
```python
|
||||
while True:
|
||||
res = consumer.poll(timeout=1.0)
|
||||
if not res:
|
||||
message = consumer.poll(1)
|
||||
if not message:
|
||||
continue
|
||||
err = res.error()
|
||||
if err is not None:
|
||||
raise err
|
||||
|
||||
for block in message:
|
||||
for row in block:
|
||||
print(row)
|
||||
|
|
|
@ -49,6 +49,7 @@ window_clause: {
|
|||
| STATE_WINDOW(col)
|
||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [FILL(fill_mod_and_val)]
|
||||
| EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition
|
||||
| COUNT_WINDOW(count_val[, sliding_val])
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -180,6 +181,19 @@ select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c
|
|||
|
||||

|
||||
|
||||
### 计数窗口
|
||||
|
||||
计数窗口按固定的数据行数来划分窗口。默认将数据按时间戳排序,再按照count_val的值,将数据划分为多个窗口,然后做聚合计算。count_val表示每个count window包含的最大数据行数,总数据行数不能整除count_val时,最后一个窗口的行数会小于count_val。sliding_val是常量,表示窗口滑动的数量,类似于 interval的SLIDING。
|
||||
|
||||
以下面的 SQL 语句为例,计数窗口切分如图所示:
|
||||
```sql
|
||||
select _wstart, _wend, count(*) from t count_window(4);
|
||||
```
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
### 时间戳伪列
|
||||
|
||||
窗口聚合查询结果中,如果 SQL 语句中没有指定输出查询结果中的时间戳列,那么最终结果中不会自动包含窗口的时间列信息。如果需要在结果中输出聚合结果所对应的时间窗口信息,需要在 SELECT 子句中使用时间戳相关的伪列: 时间窗口起始时间 (\_WSTART), 时间窗口结束时间 (\_WEND), 时间窗口持续时间 (\_WDURATION), 以及查询整体窗口相关的伪列: 查询窗口起始时间(\_QSTART) 和查询窗口结束时间(\_QEND)。需要注意的是时间窗口起始时间和结束时间均是闭区间,时间窗口持续时间是数据当前时间分辨率下的数值。例如,如果当前数据库的时间分辨率是毫秒,那么结果中 500 就表示当前时间窗口的持续时间是 500毫秒 (500 ms)。
|
||||
|
|
|
@ -49,10 +49,14 @@ window_clause: {
|
|||
SESSION(ts_col, tol_val)
|
||||
| STATE_WINDOW(col)
|
||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)]
|
||||
| EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition
|
||||
| COUNT_WINDOW(count_val[, sliding_val])
|
||||
}
|
||||
```
|
||||
|
||||
其中,SESSION 是会话窗口,tol_val 是时间间隔的最大范围。在 tol_val 时间间隔范围内的数据都属于同一个窗口,如果连续的两条数据的时间超过 tol_val,则自动开启下一个窗口。
|
||||
EVENT_WINDOW 是事件窗口,根据开始条件和结束条件来划定窗口。当 start_trigger_condition 满足时则窗口开始,直到 end_trigger_condition 满足时窗口关闭。 start_trigger_condition 和 end_trigger_condition 可以是任意 TDengine 支持的条件表达式,且可以包含不同的列。
|
||||
COUNT_WINDOW 是计数窗口,按固定的数据行数来划分窗口。 count_val 是常量,是正整数,必须大于等于2,小于2147483648。 count_val 表示每个 COUNT_WINDOW 包含的最大数据行数,总数据行数不能整除 count_val 时,最后一个窗口的行数会小于 count_val 。 sliding_val 是常量,表示窗口滑动的数量,类似于 INTERVAL 的 SLIDING 。
|
||||
|
||||
窗口的定义与时序数据特色查询中的定义完全相同,详见 [TDengine 特色查询](../distinguished)
|
||||
|
||||
|
@ -61,6 +65,12 @@ window_clause: {
|
|||
```sql
|
||||
CREATE STREAM avg_vol_s INTO avg_vol AS
|
||||
SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
|
||||
|
||||
CREATE STREAM streams0 INTO streamt0 AS
|
||||
SELECT _wstart, count(*), avg(voltage) from meters PARTITION BY tbname EVENT_WINDOW START WITH voltage < 0 END WITH voltage > 9;
|
||||
|
||||
CREATE STREAM streams1 IGNORE EXPIRED 1 WATERMARK 100s INTO streamt1 AS
|
||||
SELECT _wstart, count(*), avg(voltage) from meters PARTITION BY tbname COUNT_WINDOW(10);
|
||||
```
|
||||
|
||||
## 流式计算的 partition
|
||||
|
|
|
@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.2.3.0
|
||||
|
||||
<Release type="tdengine" version="3.2.3.0" />
|
||||
|
||||
## 3.2.2.0
|
||||
|
||||
<Release type="tdengine" version="3.2.2.0" />
|
||||
|
|
|
@ -219,7 +219,6 @@ extern bool tsFilterScalarMode;
|
|||
extern int32_t tsMaxStreamBackendCache;
|
||||
extern int32_t tsPQSortMemThreshold;
|
||||
extern int32_t tsResolveFQDNRetryTime;
|
||||
extern bool tsDisableCount;
|
||||
|
||||
extern bool tsExperimental;
|
||||
// #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
|
||||
|
@ -234,10 +233,10 @@ int32_t taosCfgDynamicOptions(SConfig *pCfg, char *name, bool forServer);
|
|||
|
||||
struct SConfig *taosGetCfg();
|
||||
|
||||
void taosSetAllDebugFlag(int32_t flag);
|
||||
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal);
|
||||
void taosLocalCfgForbiddenToChange(char *name, bool *forbidden);
|
||||
int8_t taosGranted(int8_t type);
|
||||
void taosSetGlobalDebugFlag(int32_t flag);
|
||||
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal);
|
||||
void taosLocalCfgForbiddenToChange(char *name, bool *forbidden);
|
||||
int8_t taosGranted(int8_t type);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -581,8 +581,8 @@ typedef struct {
|
|||
};
|
||||
} SSubmitRsp;
|
||||
|
||||
int32_t tEncodeSSubmitRsp(SEncoder* pEncoder, const SSubmitRsp* pRsp);
|
||||
int32_t tDecodeSSubmitRsp(SDecoder* pDecoder, SSubmitRsp* pRsp);
|
||||
// int32_t tEncodeSSubmitRsp(SEncoder* pEncoder, const SSubmitRsp* pRsp);
|
||||
// int32_t tDecodeSSubmitRsp(SDecoder* pDecoder, SSubmitRsp* pRsp);
|
||||
// void tFreeSSubmitBlkRsp(void* param);
|
||||
void tFreeSSubmitRsp(SSubmitRsp* pRsp);
|
||||
|
||||
|
@ -885,8 +885,8 @@ typedef struct {
|
|||
int64_t maxStorage;
|
||||
} SCreateAcctReq, SAlterAcctReq;
|
||||
|
||||
int32_t tSerializeSCreateAcctReq(void* buf, int32_t bufLen, SCreateAcctReq* pReq);
|
||||
int32_t tDeserializeSCreateAcctReq(void* buf, int32_t bufLen, SCreateAcctReq* pReq);
|
||||
// int32_t tSerializeSCreateAcctReq(void* buf, int32_t bufLen, SCreateAcctReq* pReq);
|
||||
// int32_t tDeserializeSCreateAcctReq(void* buf, int32_t bufLen, SCreateAcctReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
char user[TSDB_USER_LEN];
|
||||
|
@ -3446,7 +3446,7 @@ int32_t tDeserializeSCreateTagIdxReq(void* buf, int32_t bufLen, SCreateTagIndexR
|
|||
|
||||
typedef SMDropSmaReq SDropTagIndexReq;
|
||||
|
||||
int32_t tSerializeSDropTagIdxReq(void* buf, int32_t bufLen, SDropTagIndexReq* pReq);
|
||||
// int32_t tSerializeSDropTagIdxReq(void* buf, int32_t bufLen, SDropTagIndexReq* pReq);
|
||||
int32_t tDeserializeSDropTagIdxReq(void* buf, int32_t bufLen, SDropTagIndexReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
|
@ -3567,8 +3567,8 @@ typedef struct {
|
|||
int8_t igNotExists;
|
||||
} SMDropFullTextReq;
|
||||
|
||||
int32_t tSerializeSMDropFullTextReq(void* buf, int32_t bufLen, SMDropFullTextReq* pReq);
|
||||
int32_t tDeserializeSMDropFullTextReq(void* buf, int32_t bufLen, SMDropFullTextReq* pReq);
|
||||
// int32_t tSerializeSMDropFullTextReq(void* buf, int32_t bufLen, SMDropFullTextReq* pReq);
|
||||
// int32_t tDeserializeSMDropFullTextReq(void* buf, int32_t bufLen, SMDropFullTextReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
char indexFName[TSDB_INDEX_FNAME_LEN];
|
||||
|
@ -3820,6 +3820,7 @@ typedef struct {
|
|||
uint32_t phyLen;
|
||||
char* sql;
|
||||
char* msg;
|
||||
int8_t source;
|
||||
} SVDeleteReq;
|
||||
|
||||
int32_t tSerializeSVDeleteReq(void* buf, int32_t bufLen, SVDeleteReq* pReq);
|
||||
|
@ -3841,6 +3842,7 @@ typedef struct SDeleteRes {
|
|||
char tableFName[TSDB_TABLE_NAME_LEN];
|
||||
char tsColName[TSDB_COL_NAME_LEN];
|
||||
int64_t ctimeMs; // fill by vnode
|
||||
int8_t source;
|
||||
} SDeleteRes;
|
||||
|
||||
int32_t tEncodeDeleteRes(SEncoder* pCoder, const SDeleteRes* pRes);
|
||||
|
|
|
@ -223,10 +223,10 @@ typedef struct SStoreTqReader {
|
|||
bool (*tqReaderCurrentBlockConsumed)();
|
||||
|
||||
struct SWalReader* (*tqReaderGetWalReader)(); // todo remove it
|
||||
int32_t (*tqReaderRetrieveTaosXBlock)(); // todo remove it
|
||||
// int32_t (*tqReaderRetrieveTaosXBlock)(); // todo remove it
|
||||
|
||||
int32_t (*tqReaderSetSubmitMsg)(); // todo remove it
|
||||
bool (*tqReaderNextBlockFilterOut)();
|
||||
// bool (*tqReaderNextBlockFilterOut)();
|
||||
} SStoreTqReader;
|
||||
|
||||
typedef struct SStoreSnapshotFn {
|
||||
|
|
|
@ -78,6 +78,7 @@ typedef struct SSchedulerReq {
|
|||
void* chkKillParam;
|
||||
SExecResult* pExecRes;
|
||||
void** pFetchRes;
|
||||
int8_t source;
|
||||
} SSchedulerReq;
|
||||
|
||||
int32_t schedulerInit(void);
|
||||
|
|
|
@ -56,7 +56,6 @@ extern "C" {
|
|||
#define STREAM_EXEC_T_RESTART_ALL_TASKS (-4)
|
||||
#define STREAM_EXEC_T_STOP_ALL_TASKS (-5)
|
||||
#define STREAM_EXEC_T_RESUME_TASK (-6)
|
||||
#define STREAM_EXEC_T_UPDATE_TASK_EPSET (-7)
|
||||
|
||||
typedef struct SStreamTask SStreamTask;
|
||||
typedef struct SStreamQueue SStreamQueue;
|
||||
|
@ -783,11 +782,14 @@ bool streamTaskIsAllUpstreamClosed(SStreamTask* pTask);
|
|||
bool streamTaskSetSchedStatusWait(SStreamTask* pTask);
|
||||
int8_t streamTaskSetSchedStatusActive(SStreamTask* pTask);
|
||||
int8_t streamTaskSetSchedStatusInactive(SStreamTask* pTask);
|
||||
int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, int32_t clearRelHalt, bool metaLock);
|
||||
int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, int32_t clearRelHalt);
|
||||
|
||||
int32_t streamTaskHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event);
|
||||
int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent event);
|
||||
void streamTaskRestoreStatus(SStreamTask* pTask);
|
||||
|
||||
typedef int32_t (*__state_trans_user_fn)(SStreamTask*, void* param);
|
||||
int32_t streamTaskHandleEventAsync(SStreamTaskSM* pSM, EStreamTaskEvent event, __state_trans_user_fn callbackFn, void* param);
|
||||
int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent event, __state_trans_user_fn callbackFn, void* param);
|
||||
int32_t streamTaskRestoreStatus(SStreamTask* pTask);
|
||||
|
||||
int32_t streamSendCheckRsp(const SStreamMeta* pMeta, const SStreamTaskCheckReq* pReq, SStreamTaskCheckRsp* pRsp,
|
||||
SRpcHandleInfo* pRpcInfo, int32_t taskId);
|
||||
|
|
|
@ -433,7 +433,7 @@ int32_t* taosGetErrno();
|
|||
|
||||
//mnode-compact
|
||||
#define TSDB_CODE_MND_INVALID_COMPACT_ID TAOS_DEF_ERROR_CODE(0, 0x04B1)
|
||||
|
||||
#define TSDB_CODE_MND_COMPACT_DETAIL_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x04B2)
|
||||
|
||||
// vnode
|
||||
// #define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) // 2.x
|
||||
|
|
|
@ -187,6 +187,8 @@ typedef enum ELogicConditionType {
|
|||
LOGIC_COND_TYPE_NOT,
|
||||
} ELogicConditionType;
|
||||
|
||||
#define TSDB_INT32_ID_LEN 11
|
||||
|
||||
#define TSDB_NAME_DELIMITER_LEN 1
|
||||
|
||||
#define TSDB_UNI_LEN 24
|
||||
|
|
|
@ -72,40 +72,6 @@ struct STaosQnode {
|
|||
char item[];
|
||||
};
|
||||
|
||||
struct STaosQueue {
|
||||
STaosQnode *head;
|
||||
STaosQnode *tail;
|
||||
STaosQueue *next; // for queue set
|
||||
STaosQset *qset; // for queue set
|
||||
void *ahandle; // for queue set
|
||||
FItem itemFp;
|
||||
FItems itemsFp;
|
||||
TdThreadMutex mutex;
|
||||
int64_t memOfItems;
|
||||
int32_t numOfItems;
|
||||
int64_t threadId;
|
||||
int64_t memLimit;
|
||||
int64_t itemLimit;
|
||||
};
|
||||
|
||||
struct STaosQset {
|
||||
STaosQueue *head;
|
||||
STaosQueue *current;
|
||||
TdThreadMutex mutex;
|
||||
tsem_t sem;
|
||||
int32_t numOfQueues;
|
||||
int32_t numOfItems;
|
||||
};
|
||||
|
||||
struct STaosQall {
|
||||
STaosQnode *current;
|
||||
STaosQnode *start;
|
||||
int32_t numOfItems;
|
||||
int64_t memOfItems;
|
||||
int32_t unAccessedNumOfItems;
|
||||
int64_t unAccessMemOfItems;
|
||||
};
|
||||
|
||||
STaosQueue *taosOpenQueue();
|
||||
void taosCloseQueue(STaosQueue *queue);
|
||||
void taosSetQueueFp(STaosQueue *queue, FItem itemFp, FItems itemsFp);
|
||||
|
@ -140,6 +106,8 @@ int32_t taosGetQueueNumber(STaosQset *qset);
|
|||
int32_t taosReadQitemFromQset(STaosQset *qset, void **ppItem, SQueueInfo *qinfo);
|
||||
int32_t taosReadAllQitemsFromQset(STaosQset *qset, STaosQall *qall, SQueueInfo *qinfo);
|
||||
void taosResetQsetThread(STaosQset *qset, void *pItem);
|
||||
void taosQueueSetThreadId(STaosQueue *pQueue, int64_t threadId);
|
||||
int64_t taosQueueGetThreadId(STaosQueue *pQueue);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -26,6 +26,8 @@ typedef struct SScalableBf {
|
|||
SArray *bfArray; // array of bloom filters
|
||||
uint32_t growth;
|
||||
uint64_t numBits;
|
||||
uint32_t maxBloomFilters;
|
||||
int8_t status;
|
||||
_hash_fn_t hashFn1;
|
||||
_hash_fn_t hashFn2;
|
||||
} SScalableBf;
|
||||
|
|
|
@ -284,6 +284,7 @@ typedef struct SRequestObj {
|
|||
void* pWrapper;
|
||||
SMetaData parseMeta;
|
||||
char* effectiveUser;
|
||||
int8_t source;
|
||||
} SRequestObj;
|
||||
|
||||
typedef struct SSyncQueryParam {
|
||||
|
@ -306,10 +307,10 @@ void doFreeReqResultInfo(SReqResultInfo* pResInfo);
|
|||
int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, SArray** pReq);
|
||||
void syncCatalogFn(SMetaData* pResult, void* param, int32_t code);
|
||||
|
||||
TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly);
|
||||
TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly, int8_t source);
|
||||
TAOS_RES* taosQueryImplWithReqid(TAOS* taos, const char* sql, bool validateOnly, int64_t reqid);
|
||||
|
||||
void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly);
|
||||
void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly, int8_t source);
|
||||
void taosAsyncQueryImplWithReqid(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly,
|
||||
int64_t reqid);
|
||||
void taosAsyncFetchImpl(SRequestObj *pRequest, __taos_async_fn_t fp, void *param);
|
||||
|
@ -354,6 +355,7 @@ SRequestObj* acquireRequest(int64_t rid);
|
|||
int32_t releaseRequest(int64_t rid);
|
||||
int32_t removeRequest(int64_t rid);
|
||||
void doDestroyRequest(void* p);
|
||||
int64_t removeFromMostPrevReq(SRequestObj* pRequest);
|
||||
|
||||
char* getDbOfConnection(STscObj* pObj);
|
||||
void setConnectionDB(STscObj* pTscObj, const char* db);
|
||||
|
|
|
@ -80,7 +80,7 @@ extern "C" {
|
|||
#define IS_SAME_KEY (maxKV->type == kv->type && maxKV->keyLen == kv->keyLen && memcmp(maxKV->key, kv->key, kv->keyLen) == 0)
|
||||
|
||||
#define IS_SLASH_LETTER_IN_MEASUREMENT(sql) \
|
||||
(*((sql)-1) == SLASH && (*(sql) == COMMA || *(sql) == SPACE))
|
||||
(*((sql)-1) == SLASH && (*(sql) == COMMA || *(sql) == SPACE || *(sql) == SLASH))
|
||||
|
||||
#define MOVE_FORWARD_ONE(sql, len) (memmove((void *)((sql)-1), (sql), len))
|
||||
|
||||
|
|
|
@ -385,6 +385,33 @@ int32_t releaseRequest(int64_t rid) { return taosReleaseRef(clientReqRefPool, ri
|
|||
|
||||
int32_t removeRequest(int64_t rid) { return taosRemoveRef(clientReqRefPool, rid); }
|
||||
|
||||
/// return the most previous req ref id
|
||||
int64_t removeFromMostPrevReq(SRequestObj* pRequest) {
|
||||
int64_t mostPrevReqRefId = pRequest->self;
|
||||
SRequestObj* pTmp = pRequest;
|
||||
while (pTmp->relation.prevRefId) {
|
||||
pTmp = acquireRequest(pTmp->relation.prevRefId);
|
||||
if (pTmp) {
|
||||
mostPrevReqRefId = pTmp->self;
|
||||
releaseRequest(mostPrevReqRefId);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
removeRequest(mostPrevReqRefId);
|
||||
return mostPrevReqRefId;
|
||||
}
|
||||
|
||||
void destroyNextReq(int64_t nextRefId) {
|
||||
if (nextRefId) {
|
||||
SRequestObj* pObj = acquireRequest(nextRefId);
|
||||
if (pObj) {
|
||||
releaseRequest(nextRefId);
|
||||
releaseRequest(nextRefId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void destroySubRequests(SRequestObj *pRequest) {
|
||||
int32_t reqIdx = -1;
|
||||
SRequestObj *pReqList[16] = {NULL};
|
||||
|
@ -435,7 +462,7 @@ void doDestroyRequest(void *p) {
|
|||
uint64_t reqId = pRequest->requestId;
|
||||
tscTrace("begin to destroy request %" PRIx64 " p:%p", reqId, pRequest);
|
||||
|
||||
destroySubRequests(pRequest);
|
||||
int64_t nextReqRefId = pRequest->relation.nextRefId;
|
||||
|
||||
taosHashRemove(pRequest->pTscObj->pRequests, &pRequest->self, sizeof(pRequest->self));
|
||||
|
||||
|
@ -471,6 +498,7 @@ void doDestroyRequest(void *p) {
|
|||
taosMemoryFreeClear(pRequest->sqlstr);
|
||||
taosMemoryFree(pRequest);
|
||||
tscTrace("end to destroy request %" PRIx64 " p:%p", reqId, pRequest);
|
||||
destroyNextReq(nextReqRefId);
|
||||
}
|
||||
|
||||
void destroyRequest(SRequestObj *pRequest) {
|
||||
|
@ -479,7 +507,7 @@ void destroyRequest(SRequestObj *pRequest) {
|
|||
}
|
||||
|
||||
taos_stop_query(pRequest);
|
||||
removeRequest(pRequest->self);
|
||||
removeFromMostPrevReq(pRequest);
|
||||
}
|
||||
|
||||
void taosStopQueryImpl(SRequestObj *pRequest) {
|
||||
|
|
|
@ -743,6 +743,7 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
|
|||
.chkKillFp = chkRequestKilled,
|
||||
.chkKillParam = (void*)pRequest->self,
|
||||
.pExecRes = &res,
|
||||
.source = pRequest->source,
|
||||
};
|
||||
|
||||
int32_t code = schedulerExecJob(&req, &pRequest->body.queryJob);
|
||||
|
@ -1212,6 +1213,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
|
|||
.chkKillFp = chkRequestKilled,
|
||||
.chkKillParam = (void*)pRequest->self,
|
||||
.pExecRes = NULL,
|
||||
.source = pRequest->source,
|
||||
};
|
||||
code = schedulerExecJob(&req, &pRequest->body.queryJob);
|
||||
taosArrayDestroy(pNodeList);
|
||||
|
@ -2475,7 +2477,7 @@ void syncQueryFn(void* param, void* res, int32_t code) {
|
|||
tsem_post(&pParam->sem);
|
||||
}
|
||||
|
||||
void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly) {
|
||||
void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly, int8_t source) {
|
||||
if (sql == NULL || NULL == fp) {
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
if (fp) {
|
||||
|
@ -2501,6 +2503,7 @@ void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp,
|
|||
return;
|
||||
}
|
||||
|
||||
pRequest->source = source;
|
||||
pRequest->body.queryFp = fp;
|
||||
doAsyncQuery(pRequest, false);
|
||||
}
|
||||
|
@ -2535,7 +2538,7 @@ void taosAsyncQueryImplWithReqid(uint64_t connId, const char* sql, __taos_async_
|
|||
doAsyncQuery(pRequest, false);
|
||||
}
|
||||
|
||||
TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly) {
|
||||
TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly, int8_t source) {
|
||||
if (NULL == taos) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return NULL;
|
||||
|
@ -2550,7 +2553,7 @@ TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly) {
|
|||
}
|
||||
tsem_init(¶m->sem, 0, 0);
|
||||
|
||||
taosAsyncQueryImpl(*(int64_t*)taos, sql, syncQueryFn, param, validateOnly);
|
||||
taosAsyncQueryImpl(*(int64_t*)taos, sql, syncQueryFn, param, validateOnly, source);
|
||||
tsem_wait(¶m->sem);
|
||||
|
||||
SRequestObj* pRequest = NULL;
|
||||
|
|
|
@ -402,7 +402,7 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
|
|||
return pResInfo->userFields;
|
||||
}
|
||||
|
||||
TAOS_RES *taos_query(TAOS *taos, const char *sql) { return taosQueryImpl(taos, sql, false); }
|
||||
TAOS_RES *taos_query(TAOS *taos, const char *sql) { return taosQueryImpl(taos, sql, false, TD_REQ_FROM_APP); }
|
||||
TAOS_RES *taos_query_with_reqid(TAOS *taos, const char *sql, int64_t reqid) {
|
||||
return taosQueryImplWithReqid(taos, sql, false, reqid);
|
||||
}
|
||||
|
@ -828,7 +828,7 @@ int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex) {
|
|||
}
|
||||
|
||||
int taos_validate_sql(TAOS *taos, const char *sql) {
|
||||
TAOS_RES *pObj = taosQueryImpl(taos, sql, true);
|
||||
TAOS_RES *pObj = taosQueryImpl(taos, sql, true, TD_REQ_FROM_APP);
|
||||
|
||||
int code = taos_errno(pObj);
|
||||
|
||||
|
@ -1126,7 +1126,7 @@ void continueInsertFromCsv(SSqlCallbackWrapper *pWrapper, SRequestObj *pRequest)
|
|||
void taos_query_a(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param) {
|
||||
int64_t connId = *(int64_t *)taos;
|
||||
tscDebug("taos_query_a start with sql:%s", sql);
|
||||
taosAsyncQueryImpl(connId, sql, fp, param, false);
|
||||
taosAsyncQueryImpl(connId, sql, fp, param, false, TD_REQ_FROM_APP);
|
||||
tscDebug("taos_query_a end with sql:%s", sql);
|
||||
}
|
||||
|
||||
|
@ -1254,54 +1254,34 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
|
|||
}
|
||||
|
||||
void restartAsyncQuery(SRequestObj *pRequest, int32_t code) {
|
||||
int32_t reqIdx = 0;
|
||||
SRequestObj *pReqList[16] = {NULL};
|
||||
SRequestObj *pUserReq = NULL;
|
||||
pReqList[0] = pRequest;
|
||||
uint64_t tmpRefId = 0;
|
||||
SRequestObj *pTmp = pRequest;
|
||||
while (pTmp->relation.prevRefId) {
|
||||
tmpRefId = pTmp->relation.prevRefId;
|
||||
pTmp = acquireRequest(tmpRefId);
|
||||
if (pTmp) {
|
||||
pReqList[++reqIdx] = pTmp;
|
||||
releaseRequest(tmpRefId);
|
||||
} else {
|
||||
tscError("prev req ref 0x%" PRIx64 " is not there", tmpRefId);
|
||||
tscInfo("restart request: %s p: %p", pRequest->sqlstr, pRequest);
|
||||
SRequestObj* pUserReq = pRequest;
|
||||
acquireRequest(pRequest->self);
|
||||
while (pUserReq) {
|
||||
if (pUserReq->self == pUserReq->relation.userRefId || pUserReq->relation.userRefId == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
tmpRefId = pRequest->relation.nextRefId;
|
||||
while (tmpRefId) {
|
||||
pTmp = acquireRequest(tmpRefId);
|
||||
if (pTmp) {
|
||||
tmpRefId = pTmp->relation.nextRefId;
|
||||
removeRequest(pTmp->self);
|
||||
releaseRequest(pTmp->self);
|
||||
} else {
|
||||
tscError("next req ref 0x%" PRIx64 " is not there", tmpRefId);
|
||||
break;
|
||||
int64_t nextRefId = pUserReq->relation.nextRefId;
|
||||
releaseRequest(pUserReq->self);
|
||||
if (nextRefId) {
|
||||
pUserReq = acquireRequest(nextRefId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (int32_t i = reqIdx; i >= 0; i--) {
|
||||
destroyCtxInRequest(pReqList[i]);
|
||||
if (pReqList[i]->relation.userRefId == pReqList[i]->self || 0 == pReqList[i]->relation.userRefId) {
|
||||
pUserReq = pReqList[i];
|
||||
} else {
|
||||
removeRequest(pReqList[i]->self);
|
||||
}
|
||||
}
|
||||
|
||||
bool hasSubRequest = pUserReq != pRequest || pRequest->relation.prevRefId != 0;
|
||||
if (pUserReq) {
|
||||
destroyCtxInRequest(pUserReq);
|
||||
pUserReq->prevCode = code;
|
||||
memset(&pUserReq->relation, 0, sizeof(pUserReq->relation));
|
||||
} else {
|
||||
tscError("user req is missing");
|
||||
tscError("User req is missing");
|
||||
removeFromMostPrevReq(pRequest);
|
||||
return;
|
||||
}
|
||||
|
||||
if (hasSubRequest)
|
||||
removeFromMostPrevReq(pRequest);
|
||||
else
|
||||
releaseRequest(pUserReq->self);
|
||||
doAsyncQuery(pUserReq, true);
|
||||
}
|
||||
|
||||
|
|
|
@ -1256,7 +1256,7 @@ static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
snprintf(sql, sizeof(sql), "delete from `%s` where `%s` >= %" PRId64 " and `%s` <= %" PRId64, req.tableFName,
|
||||
req.tsColName, req.skey, req.tsColName, req.ekey);
|
||||
|
||||
TAOS_RES* res = taos_query(taos, sql);
|
||||
TAOS_RES* res = taosQueryImpl(taos, sql, false, TD_REQ_FROM_TAOX);
|
||||
SRequestObj* pRequest = (SRequestObj*)res;
|
||||
code = pRequest->code;
|
||||
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST || code == TSDB_CODE_PAR_GET_META_ERROR) {
|
||||
|
|
|
@ -20,14 +20,14 @@
|
|||
|
||||
#include "clientSml.h"
|
||||
|
||||
#define IS_COMMA(sql) (*(sql) == COMMA && *((sql)-1) != SLASH)
|
||||
#define IS_SPACE(sql) (*(sql) == SPACE && *((sql)-1) != SLASH)
|
||||
#define IS_EQUAL(sql) (*(sql) == EQUAL && *((sql)-1) != SLASH)
|
||||
#define IS_COMMA(sql,escapeChar) (*(sql) == COMMA && (*((sql)-1) != SLASH || ((sql)-1 == escapeChar)))
|
||||
#define IS_SPACE(sql,escapeChar) (*(sql) == SPACE && (*((sql)-1) != SLASH || ((sql)-1 == escapeChar)))
|
||||
#define IS_EQUAL(sql,escapeChar) (*(sql) == EQUAL && (*((sql)-1) != SLASH || ((sql)-1 == escapeChar)))
|
||||
|
||||
#define IS_SLASH_LETTER_IN_FIELD_VALUE(sql) (*((sql)-1) == SLASH && (*(sql) == QUOTE || *(sql) == SLASH))
|
||||
|
||||
#define IS_SLASH_LETTER_IN_TAG_FIELD_KEY(sql) \
|
||||
(*((sql)-1) == SLASH && (*(sql) == COMMA || *(sql) == SPACE || *(sql) == EQUAL))
|
||||
(*((sql)-1) == SLASH && (*(sql) == COMMA || *(sql) == SPACE || *(sql) == EQUAL || *(sql) == SLASH))
|
||||
|
||||
#define PROCESS_SLASH_IN_FIELD_VALUE(key, keyLen) \
|
||||
for (int i = 1; i < keyLen; ++i) { \
|
||||
|
@ -198,7 +198,7 @@ static int32_t smlProcessTagLine(SSmlHandle *info, char **sql, char *sqlEnd){
|
|||
int cnt = 0;
|
||||
|
||||
while (*sql < sqlEnd) {
|
||||
if (unlikely(IS_SPACE(*sql))) {
|
||||
if (unlikely(IS_SPACE(*sql,NULL))) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -207,18 +207,21 @@ static int32_t smlProcessTagLine(SSmlHandle *info, char **sql, char *sqlEnd){
|
|||
size_t keyLen = 0;
|
||||
bool keyEscaped = false;
|
||||
size_t keyLenEscaped = 0;
|
||||
const char *escapeChar = NULL;
|
||||
|
||||
while (*sql < sqlEnd) {
|
||||
if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) {
|
||||
if (unlikely(IS_SPACE(*sql,escapeChar) || IS_COMMA(*sql,escapeChar))) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
|
||||
terrno = TSDB_CODE_SML_INVALID_DATA;
|
||||
return -1;
|
||||
}
|
||||
if (unlikely(IS_EQUAL(*sql))) {
|
||||
if (unlikely(IS_EQUAL(*sql,escapeChar))) {
|
||||
keyLen = *sql - key;
|
||||
(*sql)++;
|
||||
break;
|
||||
}
|
||||
if (IS_SLASH_LETTER_IN_TAG_FIELD_KEY(*sql)) {
|
||||
escapeChar = *sql;
|
||||
keyLenEscaped++;
|
||||
keyEscaped = true;
|
||||
}
|
||||
|
@ -238,15 +241,16 @@ static int32_t smlProcessTagLine(SSmlHandle *info, char **sql, char *sqlEnd){
|
|||
size_t valueLenEscaped = 0;
|
||||
while (*sql < sqlEnd) {
|
||||
// parse value
|
||||
if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) {
|
||||
if (unlikely(IS_SPACE(*sql,escapeChar) || IS_COMMA(*sql,escapeChar))) {
|
||||
break;
|
||||
} else if (unlikely(IS_EQUAL(*sql))) {
|
||||
} else if (unlikely(IS_EQUAL(*sql,escapeChar))) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
|
||||
terrno = TSDB_CODE_SML_INVALID_DATA;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (IS_SLASH_LETTER_IN_TAG_FIELD_KEY(*sql)) {
|
||||
escapeChar = *sql;
|
||||
valueLenEscaped++;
|
||||
valueEscaped = true;
|
||||
}
|
||||
|
@ -293,7 +297,7 @@ static int32_t smlProcessTagLine(SSmlHandle *info, char **sql, char *sqlEnd){
|
|||
}
|
||||
|
||||
cnt++;
|
||||
if (IS_SPACE(*sql)) {
|
||||
if (IS_SPACE(*sql,escapeChar)) {
|
||||
break;
|
||||
}
|
||||
(*sql)++;
|
||||
|
@ -326,7 +330,7 @@ static int32_t smlParseTagLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL
|
|||
static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLineInfo *currElement) {
|
||||
int cnt = 0;
|
||||
while (*sql < sqlEnd) {
|
||||
if (unlikely(IS_SPACE(*sql))) {
|
||||
if (unlikely(IS_SPACE(*sql,NULL))) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -335,17 +339,19 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL
|
|||
size_t keyLen = 0;
|
||||
bool keyEscaped = false;
|
||||
size_t keyLenEscaped = 0;
|
||||
const char *escapeChar = NULL;
|
||||
while (*sql < sqlEnd) {
|
||||
if (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql))) {
|
||||
if (unlikely(IS_SPACE(*sql,escapeChar) || IS_COMMA(*sql,escapeChar))) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalid data", *sql);
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
if (unlikely(IS_EQUAL(*sql))) {
|
||||
if (unlikely(IS_EQUAL(*sql,escapeChar))) {
|
||||
keyLen = *sql - key;
|
||||
(*sql)++;
|
||||
break;
|
||||
}
|
||||
if (IS_SLASH_LETTER_IN_TAG_FIELD_KEY(*sql)) {
|
||||
escapeChar = *sql;
|
||||
keyLenEscaped++;
|
||||
keyEscaped = true;
|
||||
}
|
||||
|
@ -363,7 +369,6 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL
|
|||
bool valueEscaped = false;
|
||||
size_t valueLenEscaped = 0;
|
||||
int quoteNum = 0;
|
||||
const char *escapeChar = NULL;
|
||||
while (*sql < sqlEnd) {
|
||||
// parse value
|
||||
if (unlikely(*(*sql) == QUOTE && (*(*sql - 1) != SLASH || (*sql - 1) == escapeChar))) {
|
||||
|
@ -374,7 +379,7 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL
|
|||
}
|
||||
continue;
|
||||
}
|
||||
if (quoteNum % 2 == 0 && (unlikely(IS_SPACE(*sql) || IS_COMMA(*sql)))) {
|
||||
if (quoteNum % 2 == 0 && (unlikely(IS_SPACE(*sql,escapeChar) || IS_COMMA(*sql,escapeChar)))) {
|
||||
break;
|
||||
}
|
||||
if (IS_SLASH_LETTER_IN_FIELD_VALUE(*sql) && (*sql - 1) != escapeChar) {
|
||||
|
@ -437,7 +442,7 @@ static int32_t smlParseColLine(SSmlHandle *info, char **sql, char *sqlEnd, SSmlL
|
|||
}
|
||||
|
||||
cnt++;
|
||||
if (IS_SPACE(*sql)) {
|
||||
if (IS_SPACE(*sql,escapeChar)) {
|
||||
break;
|
||||
}
|
||||
(*sql)++;
|
||||
|
@ -453,19 +458,18 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
|
|||
elements->measure = sql;
|
||||
// parse measure
|
||||
size_t measureLenEscaped = 0;
|
||||
const char *escapeChar = NULL;
|
||||
while (sql < sqlEnd) {
|
||||
if (unlikely((sql != elements->measure) && IS_SLASH_LETTER_IN_MEASUREMENT(sql))) {
|
||||
elements->measureEscaped = true;
|
||||
measureLenEscaped++;
|
||||
sql++;
|
||||
continue;
|
||||
}
|
||||
if (unlikely(IS_COMMA(sql))) {
|
||||
if (unlikely(IS_COMMA(sql,escapeChar) || IS_SPACE(sql,escapeChar))) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (unlikely(IS_SPACE(sql))) {
|
||||
break;
|
||||
if (unlikely((sql != elements->measure) && IS_SLASH_LETTER_IN_MEASUREMENT(sql))) {
|
||||
elements->measureEscaped = true;
|
||||
escapeChar = sql;
|
||||
measureLenEscaped++;
|
||||
sql++;
|
||||
continue;
|
||||
}
|
||||
sql++;
|
||||
}
|
||||
|
@ -478,9 +482,12 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
|
|||
// to get measureTagsLen before
|
||||
const char *tmp = sql;
|
||||
while (tmp < sqlEnd) {
|
||||
if (unlikely(IS_SPACE(tmp))) {
|
||||
if (unlikely(IS_SPACE(tmp,escapeChar))) {
|
||||
break;
|
||||
}
|
||||
if(unlikely(IS_SLASH_LETTER_IN_TAG_FIELD_KEY(tmp))){
|
||||
escapeChar = tmp;
|
||||
}
|
||||
tmp++;
|
||||
}
|
||||
elements->measureTagsLen = tmp - elements->measure;
|
||||
|
|
|
@ -876,12 +876,13 @@ int32_t tmqHandleAllDelayedTask(tmq_t* pTmq) {
|
|||
STaosQall* qall = taosAllocateQall();
|
||||
taosReadAllQitems(pTmq->delayedTask, qall);
|
||||
|
||||
if (qall->numOfItems == 0) {
|
||||
int32_t numOfItems = taosQallItemSize(qall);
|
||||
if (numOfItems == 0) {
|
||||
taosFreeQall(qall);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
tscDebug("consumer:0x%" PRIx64 " handle delayed %d tasks before poll data", pTmq->consumerId, qall->numOfItems);
|
||||
tscDebug("consumer:0x%" PRIx64 " handle delayed %d tasks before poll data", pTmq->consumerId, numOfItems);
|
||||
int8_t* pTaskType = NULL;
|
||||
taosGetQitem(qall, (void**)&pTaskType);
|
||||
|
||||
|
@ -1009,19 +1010,8 @@ int32_t tmq_unsubscribe(tmq_t* tmq) {
|
|||
}
|
||||
taosSsleep(2); // sleep 2s for hb to send offset and rows to server
|
||||
|
||||
int32_t rsp;
|
||||
int32_t retryCnt = 0;
|
||||
tmq_list_t* lst = tmq_list_new();
|
||||
while (1) {
|
||||
rsp = tmq_subscribe(tmq, lst);
|
||||
if (rsp != TSDB_CODE_MND_CONSUMER_NOT_READY || retryCnt > 5) {
|
||||
break;
|
||||
} else {
|
||||
retryCnt++;
|
||||
taosMsleep(500);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t rsp = tmq_subscribe(tmq, lst);
|
||||
tmq_list_destroy(lst);
|
||||
return rsp;
|
||||
}
|
||||
|
@ -1271,10 +1261,9 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
|
|||
}
|
||||
|
||||
int32_t retryCnt = 0;
|
||||
while (syncAskEp(tmq) != 0) {
|
||||
if (retryCnt++ > MAX_RETRY_COUNT) {
|
||||
while ((code = syncAskEp(tmq)) != 0) {
|
||||
if (retryCnt++ > MAX_RETRY_COUNT || code == TSDB_CODE_MND_CONSUMER_NOT_EXIST) {
|
||||
tscError("consumer:0x%" PRIx64 ", mnd not ready for subscribe, retry more than 2 minutes", tmq->consumerId);
|
||||
code = TSDB_CODE_MND_CONSUMER_NOT_READY;
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
|
@ -1839,7 +1828,7 @@ static void updateVgInfo(SMqClientVg* pVg, STqOffsetVal* reqOffset, STqOffsetVal
|
|||
}
|
||||
|
||||
static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout) {
|
||||
tscDebug("consumer:0x%" PRIx64 " start to handle the rsp, total:%d", tmq->consumerId, tmq->qall->numOfItems);
|
||||
tscDebug("consumer:0x%" PRIx64 " start to handle the rsp, total:%d", tmq->consumerId, taosQallItemSize(tmq->qall));
|
||||
|
||||
while (1) {
|
||||
SMqRspWrapper* pRspWrapper = NULL;
|
||||
|
@ -2147,26 +2136,19 @@ int32_t tmq_consumer_close(tmq_t* tmq) {
|
|||
if (tmq->status == TMQ_CONSUMER_STATUS__READY) {
|
||||
// if auto commit is set, commit before close consumer. Otherwise, do nothing.
|
||||
if (tmq->autoCommit) {
|
||||
int32_t rsp = tmq_commit_sync(tmq, NULL);
|
||||
if (rsp != 0) {
|
||||
return rsp;
|
||||
int32_t code = tmq_commit_sync(tmq, NULL);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
}
|
||||
taosSsleep(2); // sleep 2s for hb to send offset and rows to server
|
||||
|
||||
int32_t retryCnt = 0;
|
||||
tmq_list_t* lst = tmq_list_new();
|
||||
while (1) {
|
||||
int32_t rsp = tmq_subscribe(tmq, lst);
|
||||
if (rsp != TSDB_CODE_MND_CONSUMER_NOT_READY || retryCnt > 5) {
|
||||
break;
|
||||
} else {
|
||||
retryCnt++;
|
||||
taosMsleep(500);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t code = tmq_subscribe(tmq, lst);
|
||||
tmq_list_destroy(lst);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
} else {
|
||||
tscInfo("consumer:0x%" PRIx64 " not in ready state, close it directly", tmq->consumerId);
|
||||
}
|
||||
|
|
|
@ -452,20 +452,21 @@ int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* p
|
|||
}
|
||||
|
||||
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
|
||||
int32_t newLen = pSource->varmeta.length;
|
||||
memcpy(pColumnInfoData->varmeta.offset, pSource->varmeta.offset, sizeof(int32_t) * numOfRows);
|
||||
if (pColumnInfoData->varmeta.allocLen < pSource->varmeta.length) {
|
||||
char* tmp = taosMemoryRealloc(pColumnInfoData->pData, pSource->varmeta.length);
|
||||
if (pColumnInfoData->varmeta.allocLen < newLen) {
|
||||
char* tmp = taosMemoryRealloc(pColumnInfoData->pData, newLen);
|
||||
if (tmp == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
pColumnInfoData->pData = tmp;
|
||||
pColumnInfoData->varmeta.allocLen = pSource->varmeta.length;
|
||||
pColumnInfoData->varmeta.allocLen = newLen;
|
||||
}
|
||||
|
||||
pColumnInfoData->varmeta.length = pSource->varmeta.length;
|
||||
pColumnInfoData->varmeta.length = newLen;
|
||||
if (pColumnInfoData->pData != NULL && pSource->pData != NULL) {
|
||||
memcpy(pColumnInfoData->pData, pSource->pData, pSource->varmeta.length);
|
||||
memcpy(pColumnInfoData->pData, pSource->pData, newLen);
|
||||
}
|
||||
} else {
|
||||
memcpy(pColumnInfoData->nullbitmap, pSource->nullbitmap, BitmapLen(numOfRows));
|
||||
|
@ -1687,7 +1688,29 @@ int32_t blockDataTrimFirstRows(SSDataBlock* pBlock, size_t n) {
|
|||
}
|
||||
|
||||
static void colDataKeepFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) {
|
||||
if (n >= total || n == 0) return;
|
||||
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
|
||||
if (pColInfoData->varmeta.length != 0) {
|
||||
int32_t newLen = pColInfoData->varmeta.offset[n];
|
||||
if (-1 == newLen) {
|
||||
for (int i = n - 1; i >= 0; --i) {
|
||||
newLen = pColInfoData->varmeta.offset[i];
|
||||
if (newLen != -1) {
|
||||
if (pColInfoData->info.type == TSDB_DATA_TYPE_JSON) {
|
||||
newLen += getJsonValueLen(pColInfoData->pData + newLen);
|
||||
} else {
|
||||
newLen += varDataTLen(pColInfoData->pData + newLen);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (newLen <= -1) {
|
||||
uFatal("colDataKeepFirstNRows: newLen:%d old:%d", newLen, pColInfoData->varmeta.length);
|
||||
} else {
|
||||
pColInfoData->varmeta.length = newLen;
|
||||
}
|
||||
}
|
||||
// pColInfoData->varmeta.length = colDataMoveVarData(pColInfoData, 0, n);
|
||||
memset(&pColInfoData->varmeta.offset[n], 0, total - n);
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ int32_t tsNumOfMnodeQueryThreads = 4;
|
|||
int32_t tsNumOfMnodeFetchThreads = 1;
|
||||
int32_t tsNumOfMnodeReadThreads = 1;
|
||||
int32_t tsNumOfVnodeQueryThreads = 4;
|
||||
float tsRatioOfVnodeStreamThreads = 1.5F;
|
||||
float tsRatioOfVnodeStreamThreads = 0.5F;
|
||||
int32_t tsNumOfVnodeFetchThreads = 4;
|
||||
int32_t tsNumOfVnodeRsmaThreads = 2;
|
||||
int32_t tsNumOfQnodeQueryThreads = 4;
|
||||
|
@ -269,7 +269,6 @@ int64_t tsStreamBufferSize = 128 * 1024 * 1024;
|
|||
bool tsFilterScalarMode = false;
|
||||
int tsResolveFQDNRetryTime = 100; // seconds
|
||||
int tsStreamAggCnt = 1000;
|
||||
bool tsDisableCount = true;
|
||||
|
||||
char tsS3Endpoint[TSDB_FQDN_LEN] = "<endpoint>";
|
||||
char tsS3AccessKey[TSDB_FQDN_LEN] = "<accesskey>";
|
||||
|
@ -541,8 +540,6 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
|
|||
|
||||
if (cfgAddBool(pCfg, "monitor", tsEnableMonitor, CFG_SCOPE_SERVER, CFG_DYN_SERVER) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "monitorInterval", tsMonitorInterval, 1, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) return -1;
|
||||
|
||||
if (cfgAddBool(pCfg, "disableCount", tsDisableCount, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT) != 0) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -589,7 +586,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
|
||||
tsNumOfSupportVnodes = tsNumOfCores * 2;
|
||||
tsNumOfSupportVnodes = TMAX(tsNumOfSupportVnodes, 2);
|
||||
if (cfgAddInt32(pCfg, "supportVnodes", tsNumOfSupportVnodes, 0, 4096, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "supportVnodes", tsNumOfSupportVnodes, 0, 4096, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != 0) return -1;
|
||||
|
||||
if (cfgAddInt32(pCfg, "statusInterval", tsStatusInterval, 1, 30, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "minSlidingTime", tsMinSlidingTime, 1, 1000000, CFG_SCOPE_CLIENT, CFG_DYN_CLIENT) != 0)
|
||||
|
@ -705,7 +702,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
if (cfgAddInt32(pCfg, "monitorIntervalForBasic", tsMonitorIntervalForBasic, 1, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0)
|
||||
return -1;
|
||||
if (cfgAddBool(pCfg, "monitorForceV2", tsMonitorForceV2, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) return -1;
|
||||
|
||||
|
||||
if (cfgAddBool(pCfg, "audit", tsEnableAudit, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "auditCreateTable", tsEnableAuditCreateTable, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "auditInterval", tsAuditInterval, 500, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) return -1;
|
||||
|
@ -1109,8 +1106,6 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
|
|||
tsKeepAliveIdle = cfgGetItem(pCfg, "keepAliveIdle")->i32;
|
||||
|
||||
tsExperimental = cfgGetItem(pCfg, "experimental")->bval;
|
||||
|
||||
tsDisableCount = cfgGetItem(pCfg, "disableCount")->bval;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1174,7 +1169,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
tsMonitorLogProtocol = cfgGetItem(pCfg, "monitorLogProtocol")->bval;
|
||||
tsMonitorIntervalForBasic = cfgGetItem(pCfg, "monitorIntervalForBasic")->i32;
|
||||
tsMonitorForceV2 = cfgGetItem(pCfg, "monitorForceV2")->i32;
|
||||
|
||||
|
||||
tsEnableAudit = cfgGetItem(pCfg, "audit")->bval;
|
||||
tsEnableAuditCreateTable = cfgGetItem(pCfg, "auditCreateTable")->bval;
|
||||
tsAuditInterval = cfgGetItem(pCfg, "auditInterval")->i32;
|
||||
|
@ -1263,6 +1258,8 @@ static int32_t taosSetReleaseCfg(SConfig *pCfg) { return 0; }
|
|||
int32_t taosSetReleaseCfg(SConfig *pCfg);
|
||||
#endif
|
||||
|
||||
static void taosSetAllDebugFlag(SConfig *pCfg, int32_t flag);
|
||||
|
||||
int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDir, const char **envCmd,
|
||||
const char *envFile, char *apolloUrl, SArray *pArgs, bool tsc) {
|
||||
if (tsCfg == NULL) osDefaultInit();
|
||||
|
@ -1307,7 +1304,7 @@ int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDi
|
|||
taosSetServerLogCfg(pCfg);
|
||||
}
|
||||
|
||||
taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32);
|
||||
taosSetAllDebugFlag(pCfg, cfgGetItem(pCfg, "debugFlag")->i32);
|
||||
|
||||
if (taosMulModeMkDir(tsLogDir, 0777, true) != 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
|
@ -1356,6 +1353,7 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile
|
|||
if (taosAddClientLogCfg(tsCfg) != 0) return -1;
|
||||
if (taosAddServerLogCfg(tsCfg) != 0) return -1;
|
||||
}
|
||||
|
||||
taosAddSystemCfg(tsCfg);
|
||||
|
||||
if (taosLoadCfg(tsCfg, envCmd, cfgDir, envFile, apolloUrl) != 0) {
|
||||
|
@ -1382,10 +1380,12 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile
|
|||
if (taosSetTfsCfg(tsCfg) != 0) return -1;
|
||||
if (taosSetS3Cfg(tsCfg) != 0) return -1;
|
||||
}
|
||||
|
||||
taosSetSystemCfg(tsCfg);
|
||||
|
||||
if (taosSetFileHandlesLimit() != 0) return -1;
|
||||
|
||||
taosSetAllDebugFlag(cfgGetItem(tsCfg, "debugFlag")->i32);
|
||||
taosSetAllDebugFlag(tsCfg, cfgGetItem(tsCfg, "debugFlag")->i32);
|
||||
|
||||
cfgDumpCfg(tsCfg, tsc, false);
|
||||
|
||||
|
@ -1478,7 +1478,7 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, char *name) {
|
|||
}
|
||||
|
||||
if (strncasecmp(name, "debugFlag", 9) == 0) {
|
||||
taosSetAllDebugFlag(pItem->i32);
|
||||
taosSetAllDebugFlag(pCfg, pItem->i32);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1552,7 +1552,7 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
|
|||
switch (lowcaseName[0]) {
|
||||
case 'd': {
|
||||
if (strcasecmp("debugFlag", name) == 0) {
|
||||
taosSetAllDebugFlag(pItem->i32);
|
||||
taosSetAllDebugFlag(pCfg, pItem->i32);
|
||||
matched = true;
|
||||
}
|
||||
break;
|
||||
|
@ -1737,8 +1737,7 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
|
|||
{"shellActivityTimer", &tsShellActivityTimer},
|
||||
{"slowLogThreshold", &tsSlowLogThreshold},
|
||||
{"useAdapter", &tsUseAdapter},
|
||||
{"experimental", &tsExperimental},
|
||||
{"disableCount", &tsDisableCount}};
|
||||
{"experimental", &tsExperimental}};
|
||||
|
||||
if (taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true) != 0) {
|
||||
taosCfgSetOption(options, tListLen(options), pItem, false);
|
||||
|
@ -1777,11 +1776,13 @@ static void taosCheckAndSetDebugFlag(int32_t *pFlagPtr, char *name, int32_t flag
|
|||
taosSetDebugFlag(pFlagPtr, name, flag);
|
||||
}
|
||||
|
||||
void taosSetAllDebugFlag(int32_t flag) {
|
||||
void taosSetGlobalDebugFlag(int32_t flag) { taosSetAllDebugFlag(tsCfg, flag); }
|
||||
|
||||
static void taosSetAllDebugFlag(SConfig *pCfg, int32_t flag) {
|
||||
if (flag <= 0) return;
|
||||
|
||||
SArray *noNeedToSetVars = NULL;
|
||||
SConfigItem *pItem = cfgGetItem(tsCfg, "debugFlag");
|
||||
SConfigItem *pItem = cfgGetItem(pCfg, "debugFlag");
|
||||
if (pItem != NULL) {
|
||||
pItem->i32 = flag;
|
||||
noNeedToSetVars = pItem->array;
|
||||
|
@ -1831,4 +1832,4 @@ int8_t taosGranted(int8_t type) {
|
|||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1009,19 +1009,19 @@ int32_t tDeserializeSCreateTagIdxReq(void *buf, int32_t bufLen, SCreateTagIndexR
|
|||
tDecoderClear(&decoder);
|
||||
return 0;
|
||||
}
|
||||
int32_t tSerializeSDropTagIdxReq(void *buf, int32_t bufLen, SDropTagIndexReq *pReq) {
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, buf, bufLen);
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
tEndEncode(&encoder);
|
||||
// int32_t tSerializeSDropTagIdxReq(void *buf, int32_t bufLen, SDropTagIndexReq *pReq) {
|
||||
// SEncoder encoder = {0};
|
||||
// tEncoderInit(&encoder, buf, bufLen);
|
||||
// if (tStartEncode(&encoder) < 0) return -1;
|
||||
// tEndEncode(&encoder);
|
||||
|
||||
if (tEncodeCStr(&encoder, pReq->name) < 0) return -1;
|
||||
if (tEncodeI8(&encoder, pReq->igNotExists) < 0) return -1;
|
||||
// if (tEncodeCStr(&encoder, pReq->name) < 0) return -1;
|
||||
// if (tEncodeI8(&encoder, pReq->igNotExists) < 0) return -1;
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
tEncoderClear(&encoder);
|
||||
return tlen;
|
||||
}
|
||||
// int32_t tlen = encoder.pos;
|
||||
// tEncoderClear(&encoder);
|
||||
// return tlen;
|
||||
// }
|
||||
int32_t tDeserializeSDropTagIdxReq(void *buf, int32_t bufLen, SDropTagIndexReq *pReq) {
|
||||
SDecoder decoder = {0};
|
||||
tDecoderInit(&decoder, buf, bufLen);
|
||||
|
@ -1035,6 +1035,7 @@ int32_t tDeserializeSDropTagIdxReq(void *buf, int32_t bufLen, SDropTagIndexReq *
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tSerializeSMCreateFullTextReq(void *buf, int32_t bufLen, SMCreateFullTextReq *pReq) {
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, buf, bufLen);
|
||||
|
@ -1059,32 +1060,32 @@ void tFreeSMCreateFullTextReq(SMCreateFullTextReq *pReq) {
|
|||
// impl later
|
||||
return;
|
||||
}
|
||||
int32_t tSerializeSMDropFullTextReq(void *buf, int32_t bufLen, SMDropFullTextReq *pReq) {
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, buf, bufLen);
|
||||
// int32_t tSerializeSMDropFullTextReq(void *buf, int32_t bufLen, SMDropFullTextReq *pReq) {
|
||||
// SEncoder encoder = {0};
|
||||
// tEncoderInit(&encoder, buf, bufLen);
|
||||
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
// if (tStartEncode(&encoder) < 0) return -1;
|
||||
|
||||
if (tEncodeCStr(&encoder, pReq->name) < 0) return -1;
|
||||
// if (tEncodeCStr(&encoder, pReq->name) < 0) return -1;
|
||||
|
||||
if (tEncodeI8(&encoder, pReq->igNotExists) < 0) return -1;
|
||||
// if (tEncodeI8(&encoder, pReq->igNotExists) < 0) return -1;
|
||||
|
||||
tEndEncode(&encoder);
|
||||
int32_t tlen = encoder.pos;
|
||||
tEncoderClear(&encoder);
|
||||
return tlen;
|
||||
}
|
||||
int32_t tDeserializeSMDropFullTextReq(void *buf, int32_t bufLen, SMDropFullTextReq *pReq) {
|
||||
SDecoder decoder = {0};
|
||||
tDecoderInit(&decoder, buf, bufLen);
|
||||
if (tStartDecode(&decoder) < 0) return -1;
|
||||
if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1;
|
||||
if (tDecodeI8(&decoder, &pReq->igNotExists) < 0) return -1;
|
||||
// tEndEncode(&encoder);
|
||||
// int32_t tlen = encoder.pos;
|
||||
// tEncoderClear(&encoder);
|
||||
// return tlen;
|
||||
// }
|
||||
// int32_t tDeserializeSMDropFullTextReq(void *buf, int32_t bufLen, SMDropFullTextReq *pReq) {
|
||||
// SDecoder decoder = {0};
|
||||
// tDecoderInit(&decoder, buf, bufLen);
|
||||
// if (tStartDecode(&decoder) < 0) return -1;
|
||||
// if (tDecodeCStrTo(&decoder, pReq->name) < 0) return -1;
|
||||
// if (tDecodeI8(&decoder, &pReq->igNotExists) < 0) return -1;
|
||||
|
||||
tEndDecode(&decoder);
|
||||
tDecoderClear(&decoder);
|
||||
return 0;
|
||||
}
|
||||
// tEndDecode(&decoder);
|
||||
// tDecoderClear(&decoder);
|
||||
// return 0;
|
||||
// }
|
||||
|
||||
int32_t tSerializeSNotifyReq(void *buf, int32_t bufLen, SNotifyReq *pReq) {
|
||||
SEncoder encoder = {0};
|
||||
|
@ -1474,44 +1475,44 @@ void tFreeSStatisReq(SStatisReq *pReq) {
|
|||
taosMemoryFreeClear(pReq->pCont);
|
||||
}
|
||||
|
||||
int32_t tSerializeSCreateAcctReq(void *buf, int32_t bufLen, SCreateAcctReq *pReq) {
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, buf, bufLen);
|
||||
// int32_t tSerializeSCreateAcctReq(void *buf, int32_t bufLen, SCreateAcctReq *pReq) {
|
||||
// SEncoder encoder = {0};
|
||||
// tEncoderInit(&encoder, buf, bufLen);
|
||||
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pReq->user) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pReq->pass) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pReq->maxUsers) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pReq->maxDbs) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pReq->maxTimeSeries) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pReq->maxStreams) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pReq->accessState) < 0) return -1;
|
||||
if (tEncodeI64(&encoder, pReq->maxStorage) < 0) return -1;
|
||||
tEndEncode(&encoder);
|
||||
// if (tStartEncode(&encoder) < 0) return -1;
|
||||
// if (tEncodeCStr(&encoder, pReq->user) < 0) return -1;
|
||||
// if (tEncodeCStr(&encoder, pReq->pass) < 0) return -1;
|
||||
// if (tEncodeI32(&encoder, pReq->maxUsers) < 0) return -1;
|
||||
// if (tEncodeI32(&encoder, pReq->maxDbs) < 0) return -1;
|
||||
// if (tEncodeI32(&encoder, pReq->maxTimeSeries) < 0) return -1;
|
||||
// if (tEncodeI32(&encoder, pReq->maxStreams) < 0) return -1;
|
||||
// if (tEncodeI32(&encoder, pReq->accessState) < 0) return -1;
|
||||
// if (tEncodeI64(&encoder, pReq->maxStorage) < 0) return -1;
|
||||
// tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
tEncoderClear(&encoder);
|
||||
return tlen;
|
||||
}
|
||||
// int32_t tlen = encoder.pos;
|
||||
// tEncoderClear(&encoder);
|
||||
// return tlen;
|
||||
// }
|
||||
|
||||
int32_t tDeserializeSCreateAcctReq(void *buf, int32_t bufLen, SCreateAcctReq *pReq) {
|
||||
SDecoder decoder = {0};
|
||||
tDecoderInit(&decoder, buf, bufLen);
|
||||
// int32_t tDeserializeSCreateAcctReq(void *buf, int32_t bufLen, SCreateAcctReq *pReq) {
|
||||
// SDecoder decoder = {0};
|
||||
// tDecoderInit(&decoder, buf, bufLen);
|
||||
|
||||
if (tStartDecode(&decoder) < 0) return -1;
|
||||
if (tDecodeCStrTo(&decoder, pReq->user) < 0) return -1;
|
||||
if (tDecodeCStrTo(&decoder, pReq->pass) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->maxUsers) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->maxDbs) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->maxTimeSeries) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->maxStreams) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->accessState) < 0) return -1;
|
||||
if (tDecodeI64(&decoder, &pReq->maxStorage) < 0) return -1;
|
||||
tEndDecode(&decoder);
|
||||
// if (tStartDecode(&decoder) < 0) return -1;
|
||||
// if (tDecodeCStrTo(&decoder, pReq->user) < 0) return -1;
|
||||
// if (tDecodeCStrTo(&decoder, pReq->pass) < 0) return -1;
|
||||
// if (tDecodeI32(&decoder, &pReq->maxUsers) < 0) return -1;
|
||||
// if (tDecodeI32(&decoder, &pReq->maxDbs) < 0) return -1;
|
||||
// if (tDecodeI32(&decoder, &pReq->maxTimeSeries) < 0) return -1;
|
||||
// if (tDecodeI32(&decoder, &pReq->maxStreams) < 0) return -1;
|
||||
// if (tDecodeI32(&decoder, &pReq->accessState) < 0) return -1;
|
||||
// if (tDecodeI64(&decoder, &pReq->maxStorage) < 0) return -1;
|
||||
// tEndDecode(&decoder);
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
return 0;
|
||||
}
|
||||
// tDecoderClear(&decoder);
|
||||
// return 0;
|
||||
// }
|
||||
|
||||
int32_t tSerializeSDropUserReq(void *buf, int32_t bufLen, SDropUserReq *pReq) {
|
||||
SEncoder encoder = {0};
|
||||
|
@ -5238,11 +5239,11 @@ int32_t tDeserializeSQueryCompactProgressRsp(void *buf, int32_t bufLen, SQueryCo
|
|||
|
||||
if (tStartDecode(&decoder) < 0) return -1;
|
||||
|
||||
if (tDecodeI32(&decoder, &pReq->compactId) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->vgId) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->dnodeId) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->numberFileset) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->finished) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->compactId) < 0) return -2;
|
||||
if (tDecodeI32(&decoder, &pReq->vgId) < 0) return -3;
|
||||
if (tDecodeI32(&decoder, &pReq->dnodeId) < 0) return -4;
|
||||
if (tDecodeI32(&decoder, &pReq->numberFileset) < 0) return -5;
|
||||
if (tDecodeI32(&decoder, &pReq->finished) < 0) return -6;
|
||||
|
||||
tEndDecode(&decoder);
|
||||
tDecoderClear(&decoder);
|
||||
|
@ -7191,6 +7192,7 @@ int32_t tSerializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq) {
|
|||
if (tEncodeU32(&encoder, pReq->sqlLen) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pReq->sql) < 0) return -1;
|
||||
if (tEncodeBinary(&encoder, pReq->msg, pReq->phyLen) < 0) return -1;
|
||||
if (tEncodeI8(&encoder, pReq->source) < 0) return -1;
|
||||
tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
|
@ -7227,6 +7229,9 @@ int32_t tDeserializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq) {
|
|||
if (tDecodeBinaryAlloc(&decoder, (void **)&pReq->msg, &msgLen) < 0) return -1;
|
||||
pReq->phyLen = msgLen;
|
||||
|
||||
if (!tDecodeIsEnd(&decoder)) {
|
||||
if (tDecodeI8(&decoder, &pReq->source) < 0) return -1;
|
||||
}
|
||||
tEndDecode(&decoder);
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
|
@ -7934,64 +7939,64 @@ static int32_t tEncodeSSubmitBlkRsp(SEncoder *pEncoder, const SSubmitBlkRsp *pBl
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t tDecodeSSubmitBlkRsp(SDecoder *pDecoder, SSubmitBlkRsp *pBlock) {
|
||||
if (tStartDecode(pDecoder) < 0) return -1;
|
||||
// static int32_t tDecodeSSubmitBlkRsp(SDecoder *pDecoder, SSubmitBlkRsp *pBlock) {
|
||||
// if (tStartDecode(pDecoder) < 0) return -1;
|
||||
|
||||
if (tDecodeI32(pDecoder, &pBlock->code) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pBlock->uid) < 0) return -1;
|
||||
pBlock->tblFName = taosMemoryCalloc(TSDB_TABLE_FNAME_LEN, 1);
|
||||
if (NULL == pBlock->tblFName) return -1;
|
||||
if (tDecodeCStrTo(pDecoder, pBlock->tblFName) < 0) return -1;
|
||||
if (tDecodeI32v(pDecoder, &pBlock->numOfRows) < 0) return -1;
|
||||
if (tDecodeI32v(pDecoder, &pBlock->affectedRows) < 0) return -1;
|
||||
if (tDecodeI64v(pDecoder, &pBlock->sver) < 0) return -1;
|
||||
// if (tDecodeI32(pDecoder, &pBlock->code) < 0) return -1;
|
||||
// if (tDecodeI64(pDecoder, &pBlock->uid) < 0) return -1;
|
||||
// pBlock->tblFName = taosMemoryCalloc(TSDB_TABLE_FNAME_LEN, 1);
|
||||
// if (NULL == pBlock->tblFName) return -1;
|
||||
// if (tDecodeCStrTo(pDecoder, pBlock->tblFName) < 0) return -1;
|
||||
// if (tDecodeI32v(pDecoder, &pBlock->numOfRows) < 0) return -1;
|
||||
// if (tDecodeI32v(pDecoder, &pBlock->affectedRows) < 0) return -1;
|
||||
// if (tDecodeI64v(pDecoder, &pBlock->sver) < 0) return -1;
|
||||
|
||||
int32_t meta = 0;
|
||||
if (tDecodeI32(pDecoder, &meta) < 0) return -1;
|
||||
if (meta) {
|
||||
pBlock->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
|
||||
if (NULL == pBlock->pMeta) return -1;
|
||||
if (tDecodeSTableMetaRsp(pDecoder, pBlock->pMeta) < 0) return -1;
|
||||
} else {
|
||||
pBlock->pMeta = NULL;
|
||||
}
|
||||
// int32_t meta = 0;
|
||||
// if (tDecodeI32(pDecoder, &meta) < 0) return -1;
|
||||
// if (meta) {
|
||||
// pBlock->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
|
||||
// if (NULL == pBlock->pMeta) return -1;
|
||||
// if (tDecodeSTableMetaRsp(pDecoder, pBlock->pMeta) < 0) return -1;
|
||||
// } else {
|
||||
// pBlock->pMeta = NULL;
|
||||
// }
|
||||
|
||||
tEndDecode(pDecoder);
|
||||
return 0;
|
||||
}
|
||||
// tEndDecode(pDecoder);
|
||||
// return 0;
|
||||
// }
|
||||
|
||||
int32_t tEncodeSSubmitRsp(SEncoder *pEncoder, const SSubmitRsp *pRsp) {
|
||||
int32_t nBlocks = taosArrayGetSize(pRsp->pArray);
|
||||
// int32_t tEncodeSSubmitRsp(SEncoder *pEncoder, const SSubmitRsp *pRsp) {
|
||||
// int32_t nBlocks = taosArrayGetSize(pRsp->pArray);
|
||||
|
||||
if (tStartEncode(pEncoder) < 0) return -1;
|
||||
// if (tStartEncode(pEncoder) < 0) return -1;
|
||||
|
||||
if (tEncodeI32v(pEncoder, pRsp->numOfRows) < 0) return -1;
|
||||
if (tEncodeI32v(pEncoder, pRsp->affectedRows) < 0) return -1;
|
||||
if (tEncodeI32v(pEncoder, nBlocks) < 0) return -1;
|
||||
for (int32_t iBlock = 0; iBlock < nBlocks; iBlock++) {
|
||||
if (tEncodeSSubmitBlkRsp(pEncoder, (SSubmitBlkRsp *)taosArrayGet(pRsp->pArray, iBlock)) < 0) return -1;
|
||||
}
|
||||
// if (tEncodeI32v(pEncoder, pRsp->numOfRows) < 0) return -1;
|
||||
// if (tEncodeI32v(pEncoder, pRsp->affectedRows) < 0) return -1;
|
||||
// if (tEncodeI32v(pEncoder, nBlocks) < 0) return -1;
|
||||
// for (int32_t iBlock = 0; iBlock < nBlocks; iBlock++) {
|
||||
// if (tEncodeSSubmitBlkRsp(pEncoder, (SSubmitBlkRsp *)taosArrayGet(pRsp->pArray, iBlock)) < 0) return -1;
|
||||
// }
|
||||
|
||||
tEndEncode(pEncoder);
|
||||
return 0;
|
||||
}
|
||||
// tEndEncode(pEncoder);
|
||||
// return 0;
|
||||
// }
|
||||
|
||||
int32_t tDecodeSSubmitRsp(SDecoder *pDecoder, SSubmitRsp *pRsp) {
|
||||
if (tStartDecode(pDecoder) < 0) return -1;
|
||||
// int32_t tDecodeSSubmitRsp(SDecoder *pDecoder, SSubmitRsp *pRsp) {
|
||||
// if (tStartDecode(pDecoder) < 0) return -1;
|
||||
|
||||
if (tDecodeI32v(pDecoder, &pRsp->numOfRows) < 0) return -1;
|
||||
if (tDecodeI32v(pDecoder, &pRsp->affectedRows) < 0) return -1;
|
||||
if (tDecodeI32v(pDecoder, &pRsp->nBlocks) < 0) return -1;
|
||||
pRsp->pBlocks = taosMemoryCalloc(pRsp->nBlocks, sizeof(*pRsp->pBlocks));
|
||||
if (pRsp->pBlocks == NULL) return -1;
|
||||
for (int32_t iBlock = 0; iBlock < pRsp->nBlocks; iBlock++) {
|
||||
if (tDecodeSSubmitBlkRsp(pDecoder, pRsp->pBlocks + iBlock) < 0) return -1;
|
||||
}
|
||||
// if (tDecodeI32v(pDecoder, &pRsp->numOfRows) < 0) return -1;
|
||||
// if (tDecodeI32v(pDecoder, &pRsp->affectedRows) < 0) return -1;
|
||||
// if (tDecodeI32v(pDecoder, &pRsp->nBlocks) < 0) return -1;
|
||||
// pRsp->pBlocks = taosMemoryCalloc(pRsp->nBlocks, sizeof(*pRsp->pBlocks));
|
||||
// if (pRsp->pBlocks == NULL) return -1;
|
||||
// for (int32_t iBlock = 0; iBlock < pRsp->nBlocks; iBlock++) {
|
||||
// if (tDecodeSSubmitBlkRsp(pDecoder, pRsp->pBlocks + iBlock) < 0) return -1;
|
||||
// }
|
||||
|
||||
tEndDecode(pDecoder);
|
||||
tDecoderClear(pDecoder);
|
||||
return 0;
|
||||
}
|
||||
// tEndDecode(pDecoder);
|
||||
// tDecoderClear(pDecoder);
|
||||
// return 0;
|
||||
// }
|
||||
|
||||
// void tFreeSSubmitBlkRsp(void *param) {
|
||||
// if (NULL == param) {
|
||||
|
@ -8426,6 +8431,7 @@ int32_t tEncodeDeleteRes(SEncoder *pCoder, const SDeleteRes *pRes) {
|
|||
if (tEncodeCStr(pCoder, pRes->tableFName) < 0) return -1;
|
||||
if (tEncodeCStr(pCoder, pRes->tsColName) < 0) return -1;
|
||||
if (tEncodeI64(pCoder, pRes->ctimeMs) < 0) return -1;
|
||||
if (tEncodeI8(pCoder, pRes->source) < 0) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -8450,6 +8456,9 @@ int32_t tDecodeDeleteRes(SDecoder *pCoder, SDeleteRes *pRes) {
|
|||
if (!tDecodeIsEnd(pCoder)) {
|
||||
if (tDecodeI64(pCoder, &pRes->ctimeMs) < 0) return -1;
|
||||
}
|
||||
if (!tDecodeIsEnd(pCoder)) {
|
||||
if (tDecodeI8(pCoder, &pRes->source) < 0) return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ static struct {
|
|||
int64_t startTime;
|
||||
} global = {0};
|
||||
|
||||
static void dmSetDebugFlag(int32_t signum, void *sigInfo, void *context) { taosSetAllDebugFlag(143); }
|
||||
static void dmSetDebugFlag(int32_t signum, void *sigInfo, void *context) { taosSetGlobalDebugFlag(143); }
|
||||
static void dmSetAssert(int32_t signum, void *sigInfo, void *context) { tsAssert = 1; }
|
||||
|
||||
static void dmStopDnode(int signum, void *sigInfo, void *context) {
|
||||
|
|
|
@ -194,26 +194,26 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal)
|
|||
while (pVnode->refCount > 0) taosMsleep(10);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode write queue:%p is empty, thread:%08" PRId64, pVnode->vgId, pVnode->pWriteW.queue,
|
||||
pVnode->pWriteW.queue->threadId);
|
||||
taosQueueGetThreadId(pVnode->pWriteW.queue));
|
||||
tMultiWorkerCleanup(&pVnode->pWriteW);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode sync queue:%p is empty, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncW.queue,
|
||||
pVnode->pSyncW.queue->threadId);
|
||||
taosQueueGetThreadId(pVnode->pSyncW.queue));
|
||||
tMultiWorkerCleanup(&pVnode->pSyncW);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode sync rd queue:%p is empty, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncRdW.queue,
|
||||
pVnode->pSyncRdW.queue->threadId);
|
||||
taosQueueGetThreadId(pVnode->pSyncRdW.queue));
|
||||
tMultiWorkerCleanup(&pVnode->pSyncRdW);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode apply queue:%p is empty, thread:%08" PRId64, pVnode->vgId, pVnode->pApplyW.queue,
|
||||
pVnode->pApplyW.queue->threadId);
|
||||
taosQueueGetThreadId(pVnode->pApplyW.queue));
|
||||
tMultiWorkerCleanup(&pVnode->pApplyW);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode query queue:%p is empty", pVnode->vgId, pVnode->pQueryQ);
|
||||
while (!taosQueueEmpty(pVnode->pQueryQ)) taosMsleep(10);
|
||||
|
||||
dInfo("vgId:%d, wait for vnode fetch queue:%p is empty, thread:%08" PRId64, pVnode->vgId, pVnode->pFetchQ,
|
||||
pVnode->pFetchQ->threadId);
|
||||
taosQueueGetThreadId(pVnode->pFetchQ));
|
||||
while (!taosQueueEmpty(pVnode->pFetchQ)) taosMsleep(10);
|
||||
|
||||
tqNotifyClose(pVnode->pImpl->pTq);
|
||||
|
|
|
@ -365,16 +365,16 @@ int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
|||
}
|
||||
|
||||
dInfo("vgId:%d, write-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pWriteW.queue,
|
||||
pVnode->pWriteW.queue->threadId);
|
||||
taosQueueGetThreadId(pVnode->pWriteW.queue));
|
||||
dInfo("vgId:%d, sync-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncW.queue,
|
||||
pVnode->pSyncW.queue->threadId);
|
||||
taosQueueGetThreadId(pVnode->pSyncW.queue));
|
||||
dInfo("vgId:%d, sync-rd-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncRdW.queue,
|
||||
pVnode->pSyncRdW.queue->threadId);
|
||||
taosQueueGetThreadId(pVnode->pSyncRdW.queue));
|
||||
dInfo("vgId:%d, apply-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pApplyW.queue,
|
||||
pVnode->pApplyW.queue->threadId);
|
||||
taosQueueGetThreadId(pVnode->pApplyW.queue));
|
||||
dInfo("vgId:%d, query-queue:%p is alloced", pVnode->vgId, pVnode->pQueryQ);
|
||||
dInfo("vgId:%d, fetch-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pFetchQ,
|
||||
pVnode->pFetchQ->threadId);
|
||||
taosQueueGetThreadId(pVnode->pFetchQ));
|
||||
dInfo("vgId:%d, stream-queue:%p is alloced", pVnode->vgId, pVnode->pStreamQ);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -345,6 +345,7 @@ int32_t dmInitClient(SDnode *pDnode) {
|
|||
rpcInit.parent = pDnode;
|
||||
rpcInit.rfp = rpcRfp;
|
||||
rpcInit.compressSize = tsCompressMsgSize;
|
||||
rpcInit.dfp = destroyAhandle;
|
||||
|
||||
rpcInit.retryMinInterval = tsRedirectPeriod;
|
||||
rpcInit.retryStepFactor = tsRedirectFactor;
|
||||
|
|
|
@ -111,7 +111,7 @@ STrans *doCreateTrans(SMnode *pMnode, SStreamObj *pStream, SRpcMsg *pReq, const
|
|||
int32_t mndPersistTransLog(SStreamObj *pStream, STrans *pTrans, int32_t status);
|
||||
SSdbRaw *mndStreamActionEncode(SStreamObj *pStream);
|
||||
void killAllCheckpointTrans(SMnode *pMnode, SVgroupChangeInfo *pChangeInfo);
|
||||
int32_t mndStreamSetUpdateEpsetAction(SStreamObj *pStream, SVgroupChangeInfo *pInfo, STrans *pTrans);
|
||||
int32_t mndStreamSetUpdateEpsetAction(SMnode *pMnode, SStreamObj *pStream, SVgroupChangeInfo *pInfo, STrans *pTrans);
|
||||
|
||||
SStreamObj *mndGetStreamObj(SMnode *pMnode, int64_t streamId);
|
||||
int32_t extractNodeEpset(SMnode *pMnode, SEpSet *pEpSet, bool *hasEpset, int32_t taskId, int32_t nodeId);
|
||||
|
|
|
@ -363,13 +363,15 @@ static int32_t mndAddKillCompactAction(SMnode *pMnode, STrans *pTrans, SVgObj *p
|
|||
}
|
||||
|
||||
static int32_t mndKillCompact(SMnode *pMnode, SRpcMsg *pReq, SCompactObj *pCompact) {
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq, "kill-compact");
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB, pReq, "kill-compact");
|
||||
if (pTrans == NULL) {
|
||||
mError("compact:%" PRId32 ", failed to drop since %s" , pCompact->compactId, terrstr());
|
||||
return -1;
|
||||
}
|
||||
mInfo("trans:%d, used to kill compact:%" PRId32, pTrans->id, pCompact->compactId);
|
||||
|
||||
mndTransSetDbName(pTrans, pCompact->dbname, NULL);
|
||||
|
||||
SSdbRaw *pCommitRaw = mndCompactActionEncode(pCompact);
|
||||
if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
|
||||
mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
|
||||
|
@ -378,7 +380,7 @@ static int32_t mndKillCompact(SMnode *pMnode, SRpcMsg *pReq, SCompactObj *pCompa
|
|||
}
|
||||
(void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY);
|
||||
|
||||
void *pIter = NULL;
|
||||
void *pIter = NULL;
|
||||
while (1) {
|
||||
SCompactDetailObj *pDetail = NULL;
|
||||
pIter = sdbFetch(pMnode->pSdb, SDB_COMPACT_DETAIL, pIter, (void **)&pDetail);
|
||||
|
@ -452,7 +454,7 @@ int32_t mndProcessKillCompactReq(SRpcMsg *pReq){
|
|||
|
||||
code = TSDB_CODE_ACTION_IN_PROGRESS;
|
||||
|
||||
char obj[MND_COMPACT_ID_LEN] = {0};
|
||||
char obj[TSDB_INT32_ID_LEN] = {0};
|
||||
sprintf(obj, "%d", pCompact->compactId);
|
||||
|
||||
auditRecord(pReq, pMnode->clusterId, "killCompact", pCompact->dbname, obj, killCompactReq.sql, killCompactReq.sqlLen);
|
||||
|
@ -488,13 +490,17 @@ static int32_t mndUpdateCompactProgress(SMnode *pMnode, SRpcMsg *pReq, int32_t c
|
|||
sdbRelease(pMnode->pSdb, pDetail);
|
||||
}
|
||||
|
||||
return -1;
|
||||
return TSDB_CODE_MND_COMPACT_DETAIL_NOT_EXIST;
|
||||
}
|
||||
|
||||
int32_t mndProcessQueryCompactRsp(SRpcMsg *pReq){
|
||||
SQueryCompactProgressRsp req = {0};
|
||||
if (tDeserializeSQueryCompactProgressRsp(pReq->pCont, pReq->contLen, &req) != 0) {
|
||||
int32_t code = 0;
|
||||
code = tDeserializeSQueryCompactProgressRsp(pReq->pCont, pReq->contLen, &req);
|
||||
if (code != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
mError("failed to deserialize vnode-query-compact-progress-rsp, ret:%d, pCont:%p, len:%d",
|
||||
code, pReq->pCont, pReq->contLen);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -502,10 +508,10 @@ int32_t mndProcessQueryCompactRsp(SRpcMsg *pReq){
|
|||
req.compactId, req.vgId, req.dnodeId, req.numberFileset, req.finished);
|
||||
|
||||
SMnode *pMnode = pReq->info.node;
|
||||
int32_t code = -1;
|
||||
|
||||
|
||||
if(mndUpdateCompactProgress(pMnode, pReq, req.compactId, &req) != 0){
|
||||
code = mndUpdateCompactProgress(pMnode, pReq, req.compactId, &req);
|
||||
if(code != 0){
|
||||
terrno = code;
|
||||
mError("compact:%d, failed to update progress, vgId:%d, dnodeId:%d, numberFileset:%d, finished:%d",
|
||||
req.compactId, req.vgId, req.dnodeId, req.numberFileset, req.finished);
|
||||
return -1;
|
||||
|
@ -612,15 +618,17 @@ static int32_t mndSaveCompactProgress(SMnode *pMnode, int32_t compactId) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, NULL, "update-compact-progress");
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB, NULL, "update-compact-progress");
|
||||
if (pTrans == NULL) {
|
||||
mError("trans:%" PRId32 ", failed to create since %s" , pTrans->id, terrstr());
|
||||
return -1;
|
||||
}
|
||||
mInfo("compact:%d, trans:%d, used to update compact progress.", compactId, pTrans->id);
|
||||
|
||||
|
||||
SCompactObj *pCompact = mndAcquireCompact(pMnode, compactId);
|
||||
|
||||
mndTransSetDbName(pTrans, pCompact->dbname, NULL);
|
||||
|
||||
pIter = NULL;
|
||||
while (1) {
|
||||
SCompactDetailObj *pDetail = NULL;
|
||||
|
|
|
@ -610,7 +610,7 @@ static int32_t mndProcessStatisReq(SRpcMsg *pReq) {
|
|||
for(int32_t j = 0; j < tagSize; j++){
|
||||
SJson* item = tjsonGetArrayItem(arrayTag, j);
|
||||
|
||||
*(labels + j) = taosMemoryMalloc(MONITOR_TAG_NAME_LEN);
|
||||
*(labels + j) = taosMemoryMalloc(MONITOR_TAG_NAME_LEN);
|
||||
tjsonGetStringValue(item, "name", *(labels + j));
|
||||
|
||||
*(sample_labels + j) = taosMemoryMalloc(MONITOR_TAG_VALUE_LEN);
|
||||
|
@ -626,7 +626,7 @@ static int32_t mndProcessStatisReq(SRpcMsg *pReq) {
|
|||
for(int32_t j = 0; j < metricLen; j++){
|
||||
SJson *item = tjsonGetArrayItem(metrics, j);
|
||||
|
||||
char name[MONITOR_METRIC_NAME_LEN] = {0};
|
||||
char name[MONITOR_METRIC_NAME_LEN] = {0};
|
||||
tjsonGetStringValue(item, "name", name);
|
||||
|
||||
double value = 0;
|
||||
|
@ -636,7 +636,7 @@ static int32_t mndProcessStatisReq(SRpcMsg *pReq) {
|
|||
tjsonGetDoubleValue(item, "type", &type);
|
||||
|
||||
int32_t metricNameLen = strlen(name) + strlen(tableName) + 2;
|
||||
char* metricName = taosMemoryMalloc(metricNameLen);
|
||||
char* metricName = taosMemoryMalloc(metricNameLen);
|
||||
memset(metricName, 0, metricNameLen);
|
||||
sprintf(metricName, "%s:%s", tableName, name);
|
||||
|
||||
|
@ -669,7 +669,7 @@ static int32_t mndProcessStatisReq(SRpcMsg *pReq) {
|
|||
else{
|
||||
mTrace("get metric from registry:%p", metric);
|
||||
}
|
||||
|
||||
|
||||
if(type == 0){
|
||||
taos_counter_add(metric, value, (const char**)sample_labels);
|
||||
}
|
||||
|
@ -689,7 +689,7 @@ static int32_t mndProcessStatisReq(SRpcMsg *pReq) {
|
|||
taosMemoryFreeClear(labels);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
code = 0;
|
||||
|
@ -1409,24 +1409,6 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
|
|||
if (strcasecmp(cfgReq.config, "resetlog") == 0) {
|
||||
strcpy(dcfgReq.config, "resetlog");
|
||||
#ifdef TD_ENTERPRISE
|
||||
} else if (strncasecmp(cfgReq.config, "supportvnodes", 13) == 0) {
|
||||
int32_t optLen = strlen("supportvnodes");
|
||||
int32_t flag = -1;
|
||||
int32_t code = mndMCfgGetValInt32(&cfgReq, optLen, &flag);
|
||||
if (code < 0) return code;
|
||||
|
||||
if (flag < 0 || flag > 4096) {
|
||||
mError("dnode:%d, failed to config supportVnodes since value:%d. Valid range: [0, 4096]", cfgReq.dnodeId, flag);
|
||||
terrno = TSDB_CODE_OUT_OF_RANGE;
|
||||
goto _err_out;
|
||||
}
|
||||
if (flag == 0) {
|
||||
flag = tsNumOfCores * 2;
|
||||
}
|
||||
flag = TMAX(flag, 2);
|
||||
|
||||
strcpy(dcfgReq.config, "supportvnodes");
|
||||
snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%d", flag);
|
||||
} else if (strncasecmp(cfgReq.config, "s3blocksize", 11) == 0) {
|
||||
int32_t optLen = strlen("s3blocksize");
|
||||
int32_t flag = -1;
|
||||
|
|
|
@ -709,7 +709,8 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) {
|
|||
|
||||
int32_t code = syncProcessMsg(pMgmt->sync, pMsg);
|
||||
if (code != 0) {
|
||||
mGError("vgId:1, failed to process sync msg:%p type:%s since %s", pMsg, TMSG_INFO(pMsg->msgType), terrstr());
|
||||
mGError("vgId:1, failed to process sync msg:%p type:%s, errno: %s, code:0x%x", pMsg, TMSG_INFO(pMsg->msgType),
|
||||
terrstr(), code);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
|
|
@ -721,6 +721,8 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
// add into buffer firstly
|
||||
// to make sure when the hb from vnode arrived, the newly created tasks have been in the task map already.
|
||||
taosThreadMutexLock(&execInfo.lock);
|
||||
mDebug("stream stream:%s tasks register into node list", createReq.name);
|
||||
saveStreamTasksInfo(&streamObj, &execInfo);
|
||||
|
@ -1811,7 +1813,7 @@ static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChange
|
|||
mDebug("stream:0x%" PRIx64 " %s involved node changed, create update trans, transId:%d", pStream->uid,
|
||||
pStream->name, pTrans->id);
|
||||
|
||||
int32_t code = mndStreamSetUpdateEpsetAction(pStream, pChangeInfo, pTrans);
|
||||
int32_t code = mndStreamSetUpdateEpsetAction(pMnode, pStream, pChangeInfo, pTrans);
|
||||
|
||||
// todo: not continue, drop all and retry again
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
|
|
@ -115,6 +115,7 @@ SArray *mndTakeVgroupSnapshot(SMnode *pMnode, bool *allReady) {
|
|||
|
||||
char buf[256] = {0};
|
||||
EPSET_TO_STR(&entry.epset, buf);
|
||||
|
||||
mDebug("take node snapshot, nodeId:%d %s", entry.nodeId, buf);
|
||||
taosArrayPush(pVgroupListSnapshot, &entry);
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
|
@ -300,7 +301,10 @@ static int32_t doSetPauseAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTa
|
|||
return code;
|
||||
}
|
||||
|
||||
mDebug("pause node:%d, epset:%d", pTask->info.nodeId, epset.numOfEps);
|
||||
char buf[256] = {0};
|
||||
EPSET_TO_STR(&epset, buf);
|
||||
mDebug("pause stream task in node:%d, epset:%s", pTask->info.nodeId, buf);
|
||||
|
||||
code = setTransAction(pTrans, pReq, sizeof(SVPauseStreamTaskReq), TDMT_STREAM_TASK_PAUSE, &epset, 0);
|
||||
if (code != 0) {
|
||||
taosMemoryFree(pReq);
|
||||
|
@ -462,14 +466,22 @@ static int32_t doBuildStreamTaskUpdateMsg(void **pBuf, int32_t *pLen, SVgroupCha
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t doSetUpdateTaskAction(STrans *pTrans, SStreamTask *pTask, SVgroupChangeInfo *pInfo) {
|
||||
static int32_t doSetUpdateTaskAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTask, SVgroupChangeInfo *pInfo) {
|
||||
void *pBuf = NULL;
|
||||
int32_t len = 0;
|
||||
streamTaskUpdateEpsetInfo(pTask, pInfo->pUpdateNodeList);
|
||||
|
||||
doBuildStreamTaskUpdateMsg(&pBuf, &len, pInfo, pTask->info.nodeId, &pTask->id, pTrans->id);
|
||||
|
||||
int32_t code = setTransAction(pTrans, pBuf, len, TDMT_VND_STREAM_TASK_UPDATE, &pTask->info.epSet, 0);
|
||||
SEpSet epset = {0};
|
||||
bool hasEpset = false;
|
||||
int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId);
|
||||
if (code != TSDB_CODE_SUCCESS || !hasEpset) {
|
||||
terrno = code;
|
||||
return code;
|
||||
}
|
||||
|
||||
code = setTransAction(pTrans, pBuf, len, TDMT_VND_STREAM_TASK_UPDATE, &epset, TSDB_CODE_VND_INVALID_VGROUP_ID);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
taosMemoryFree(pBuf);
|
||||
}
|
||||
|
@ -478,14 +490,14 @@ static int32_t doSetUpdateTaskAction(STrans *pTrans, SStreamTask *pTask, SVgroup
|
|||
}
|
||||
|
||||
// build trans to update the epset
|
||||
int32_t mndStreamSetUpdateEpsetAction(SStreamObj *pStream, SVgroupChangeInfo *pInfo, STrans *pTrans) {
|
||||
int32_t mndStreamSetUpdateEpsetAction(SMnode *pMnode, SStreamObj *pStream, SVgroupChangeInfo *pInfo, STrans *pTrans) {
|
||||
mDebug("stream:0x%" PRIx64 " set tasks epset update action", pStream->uid);
|
||||
taosWLockLatch(&pStream->lock);
|
||||
|
||||
SStreamTaskIter *pIter = createStreamTaskIter(pStream);
|
||||
while (streamTaskIterNextTask(pIter)) {
|
||||
SStreamTask *pTask = streamTaskIterGetCurrent(pIter);
|
||||
int32_t code = doSetUpdateTaskAction(pTrans, pTask, pInfo);
|
||||
int32_t code = doSetUpdateTaskAction(pMnode, pTrans, pTask, pInfo);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
destroyStreamTaskIter(pIter);
|
||||
taosWUnLockLatch(&pStream->lock);
|
||||
|
|
|
@ -106,7 +106,7 @@ typedef struct SQueryNode SQueryNode;
|
|||
#define VND_INFO_FNAME "vnode.json"
|
||||
#define VND_INFO_FNAME_TMP "vnode_tmp.json"
|
||||
|
||||
#define VNODE_METRIC_SQL_COUNT "taos_sql_req:count"
|
||||
#define VNODE_METRIC_SQL_COUNT "taosd_sql_req:count"
|
||||
|
||||
#define VNODE_METRIC_TAG_NAME_SQL_TYPE "sql_type"
|
||||
#define VNODE_METRIC_TAG_NAME_CLUSTER_ID "cluster_id"
|
||||
|
|
|
@ -917,6 +917,22 @@ static void doStartFillhistoryStep2(SStreamTask* pTask, SStreamTask* pStreamTask
|
|||
}
|
||||
}
|
||||
|
||||
int32_t handleStep2Async(SStreamTask* pStreamTask, void* param) {
|
||||
STQ* pTq = param;
|
||||
|
||||
SStreamMeta* pMeta = pStreamTask->pMeta;
|
||||
STaskId hId = pStreamTask->hTaskInfo.id;
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pStreamTask->pMeta, hId.streamId, hId.taskId);
|
||||
if (pTask == NULL) {
|
||||
// todo handle error
|
||||
}
|
||||
|
||||
doStartFillhistoryStep2(pTask, pStreamTask, pTq);
|
||||
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// this function should be executed by only one thread, so we set an sentinel to protect this function
|
||||
int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||
SStreamScanHistoryReq* pReq = (SStreamScanHistoryReq*)pMsg->pCont;
|
||||
|
@ -1007,37 +1023,27 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
// the following procedure should be executed, no matter status is stop/pause or not
|
||||
tqDebug("s-task:%s scan-history(step 1) ended, elapsed time:%.2fs", id, pTask->execInfo.step1El);
|
||||
|
||||
if (pTask->info.fillHistory) {
|
||||
SStreamTask* pStreamTask = NULL;
|
||||
ASSERT(pTask->info.fillHistory == 1);
|
||||
|
||||
// 1. get the related stream task
|
||||
pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId);
|
||||
if (pStreamTask == NULL) {
|
||||
tqError("failed to find s-task:0x%" PRIx64 ", it may have been destroyed, drop related fill-history task:%s",
|
||||
pTask->streamTaskId.taskId, pTask->id.idStr);
|
||||
// 1. get the related stream task
|
||||
SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId);
|
||||
if (pStreamTask == NULL) {
|
||||
tqError("failed to find s-task:0x%" PRIx64 ", it may have been destroyed, drop related fill-history task:%s",
|
||||
pTask->streamTaskId.taskId, pTask->id.idStr);
|
||||
|
||||
tqDebug("s-task:%s fill-history task set status to be dropping and drop it", id);
|
||||
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &pTask->id, 0);
|
||||
|
||||
atomic_store_32(&pTask->status.inScanHistorySentinel, 0);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ASSERT(pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE);
|
||||
|
||||
code = streamTaskHandleEvent(pStreamTask->status.pSM, TASK_EVENT_HALT);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
doStartFillhistoryStep2(pTask, pStreamTask, pTq);
|
||||
} else {
|
||||
tqError("s-task:%s failed to halt s-task:%s, not launch step2", id, pStreamTask->id.idStr);
|
||||
}
|
||||
|
||||
streamMetaReleaseTask(pMeta, pStreamTask);
|
||||
} else {
|
||||
ASSERT(0);
|
||||
atomic_store_32(&pTask->status.inScanHistorySentinel, 0);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ASSERT(pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE);
|
||||
code = streamTaskHandleEventAsync(pStreamTask->status.pSM, TASK_EVENT_HALT, handleStep2Async, pTq);
|
||||
|
||||
streamMetaReleaseTask(pMeta, pStreamTask);
|
||||
|
||||
atomic_store_32(&pTask->status.inScanHistorySentinel, 0);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return code;
|
||||
|
|
|
@ -368,24 +368,11 @@ int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, int64_t maxVer, con
|
|||
}
|
||||
}
|
||||
|
||||
// todo ignore the error in wal?
|
||||
bool tqNextBlockInWal(STqReader* pReader, const char* id, int sourceExcluded) {
|
||||
SWalReader* pWalReader = pReader->pWalReader;
|
||||
SSDataBlock* pDataBlock = NULL;
|
||||
|
||||
uint64_t st = taosGetTimestampMs();
|
||||
while (1) {
|
||||
// try next message in wal file
|
||||
if (walNextValidMsg(pWalReader) < 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
void* pBody = POINTER_SHIFT(pWalReader->pHead->head.body, sizeof(SSubmitReq2Msg));
|
||||
int32_t bodyLen = pWalReader->pHead->head.bodyLen - sizeof(SSubmitReq2Msg);
|
||||
int64_t ver = pWalReader->pHead->head.version;
|
||||
|
||||
tqReaderSetSubmitMsg(pReader, pBody, bodyLen, ver);
|
||||
pReader->nextBlk = 0;
|
||||
int32_t numOfBlocks = taosArrayGetSize(pReader->submit.aSubmitTbData);
|
||||
while (pReader->nextBlk < numOfBlocks) {
|
||||
tqTrace("tq reader next data block %d/%d, len:%d %" PRId64, pReader->nextBlk, numOfBlocks, pReader->msg.msgLen,
|
||||
|
@ -400,33 +387,32 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id, int sourceExcluded) {
|
|||
tqTrace("tq reader return submit block, uid:%" PRId64, pSubmitTbData->uid);
|
||||
SSDataBlock* pRes = NULL;
|
||||
int32_t code = tqRetrieveDataBlock(pReader, &pRes, NULL);
|
||||
if (code == TSDB_CODE_SUCCESS && pRes->info.rows > 0) {
|
||||
if (pDataBlock == NULL) {
|
||||
pDataBlock = createOneDataBlock(pRes, true);
|
||||
} else {
|
||||
blockDataMerge(pDataBlock, pRes);
|
||||
}
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
pReader->nextBlk += 1;
|
||||
tqTrace("tq reader discard submit block, uid:%" PRId64 ", continue", pSubmitTbData->uid);
|
||||
}
|
||||
}
|
||||
|
||||
tDestroySubmitReq(&pReader->submit, TSDB_MSG_FLG_DECODE);
|
||||
pReader->msg.msgStr = NULL;
|
||||
|
||||
if (pDataBlock != NULL) {
|
||||
blockDataCleanup(pReader->pResBlock);
|
||||
copyDataBlock(pReader->pResBlock, pDataBlock);
|
||||
blockDataDestroy(pDataBlock);
|
||||
return true;
|
||||
} else {
|
||||
qTrace("stream scan return empty, all %d submit blocks consumed, %s", numOfBlocks, id);
|
||||
}
|
||||
|
||||
if (taosGetTimestampMs() - st > 1000) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// try next message in wal file
|
||||
if (walNextValidMsg(pWalReader) < 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
void* pBody = POINTER_SHIFT(pWalReader->pHead->head.body, sizeof(SSubmitReq2Msg));
|
||||
int32_t bodyLen = pWalReader->pHead->head.bodyLen - sizeof(SSubmitReq2Msg);
|
||||
int64_t ver = pWalReader->pHead->head.version;
|
||||
tqReaderSetSubmitMsg(pReader, pBody, bodyLen, ver);
|
||||
pReader->nextBlk = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,8 +28,8 @@ static int32_t tqScanWalInFuture(STQ* pTq, int32_t numOfTasks, int32_t idleDurat
|
|||
|
||||
// extract data blocks(submit/delete) from WAL, and add them into the input queue for all the sources tasks.
|
||||
int32_t tqScanWal(STQ* pTq) {
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||
int32_t vgId = pMeta->vgId;
|
||||
int64_t st = taosGetTimestampMs();
|
||||
|
||||
tqDebug("vgId:%d continue to check if data in wal are available, scanCounter:%d", vgId, pMeta->scanInfo.scanCounter);
|
||||
|
|
|
@ -263,8 +263,7 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
|||
} else if (pHead->msgType == TDMT_VND_CREATE_STB || pHead->msgType == TDMT_VND_ALTER_STB) {
|
||||
PROCESS_EXCLUDED_MSG(SVCreateStbReq, tDecodeSVCreateStbReq)
|
||||
} else if (pHead->msgType == TDMT_VND_DELETE) {
|
||||
fetchVer++;
|
||||
continue;
|
||||
PROCESS_EXCLUDED_MSG(SDeleteRes, tDecodeDeleteRes)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -142,8 +142,10 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
|
|||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||
ppHTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id));
|
||||
if (ppHTask == NULL || *ppHTask == NULL) {
|
||||
tqError("vgId:%d failed to acquire fill-history task:0x%x when handling update, it may have been dropped already",
|
||||
vgId, req.taskId);
|
||||
tqError(
|
||||
"vgId:%d failed to acquire fill-history task:0x%x when handling update, may have been dropped already, rel "
|
||||
"stream task:0x%x",
|
||||
vgId, (uint32_t)pTask->hTaskInfo.id.taskId, req.taskId);
|
||||
CLEAR_RELATED_FILLHISTORY_TASK(pTask);
|
||||
} else {
|
||||
tqDebug("s-task:%s fill-history task update nodeEp along with stream task", (*ppHTask)->id.idStr);
|
||||
|
@ -612,23 +614,35 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve
|
|||
|
||||
int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen) {
|
||||
SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg;
|
||||
int32_t vgId = pMeta->vgId;
|
||||
STaskId hTaskId = {0};
|
||||
|
||||
int32_t vgId = pMeta->vgId;
|
||||
tqDebug("vgId:%d receive msg to drop s-task:0x%x", vgId, pReq->taskId);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId);
|
||||
if (pTask != NULL) {
|
||||
// drop the related fill-history task firstly
|
||||
streamMetaWLock(pMeta);
|
||||
|
||||
STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId};
|
||||
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||
if ((ppTask != NULL) && ((*ppTask) != NULL)) {
|
||||
streamMetaAcquireOneTask(*ppTask);
|
||||
SStreamTask* pTask = *ppTask;
|
||||
|
||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||
STaskId* pHTaskId = &pTask->hTaskInfo.id;
|
||||
streamMetaUnregisterTask(pMeta, pHTaskId->streamId, pHTaskId->taskId);
|
||||
tqDebug("s-task:0x%x vgId:%d drop fill-history task:0x%x firstly", pReq->taskId, vgId,
|
||||
(int32_t)pHTaskId->taskId);
|
||||
hTaskId.streamId = pTask->hTaskInfo.id.streamId;
|
||||
hTaskId.taskId = pTask->hTaskInfo.id.taskId;
|
||||
}
|
||||
|
||||
streamTaskClearHTaskAttr(pTask, pReq->resetRelHalt);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
}
|
||||
|
||||
streamTaskClearHTaskAttr(pTask, pReq->resetRelHalt, true);
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
// drop the related fill-history task firstly
|
||||
if (hTaskId.taskId != 0 && hTaskId.streamId != 0) {
|
||||
streamMetaUnregisterTask(pMeta, hTaskId.streamId, hTaskId.taskId);
|
||||
tqDebug("s-task:0x%x vgId:%d drop rel fill-history task:0x%x firstly", pReq->taskId, vgId, (int32_t)hTaskId.taskId);
|
||||
}
|
||||
|
||||
// drop the stream task now
|
||||
streamMetaUnregisterTask(pMeta, pReq->streamId, pReq->taskId);
|
||||
|
@ -865,7 +879,7 @@ int32_t tqStreamTaskProcessTaskPauseReq(SStreamMeta* pMeta, char* pMsg){
|
|||
pHistoryTask = streamMetaAcquireTask(pMeta, pTask->hTaskInfo.id.streamId, pTask->hTaskInfo.id.taskId);
|
||||
if (pHistoryTask == NULL) {
|
||||
tqError("vgId:%d process pause req, failed to acquire fill-history task:0x%" PRIx64
|
||||
", it may have been dropped already",
|
||||
", it may have been dropped already",
|
||||
pMeta->vgId, pTask->hTaskInfo.id.taskId);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
|
||||
|
|
|
@ -789,25 +789,6 @@ int32_t tsdbCacheDropSTableColumn(STsdb *pTsdb, SArray *uids, int16_t cid, int8_
|
|||
return code;
|
||||
}
|
||||
|
||||
static SLastCol *tsdbCacheLookup(STsdb *pTsdb, tb_uid_t uid, int16_t cid, int8_t ltype) {
|
||||
SLastCol *pLastCol = NULL;
|
||||
|
||||
char *err = NULL;
|
||||
size_t vlen = 0;
|
||||
SLastKey *key = &(SLastKey){.ltype = ltype, .uid = uid, .cid = cid};
|
||||
size_t klen = ROCKS_KEY_LEN;
|
||||
char *value = NULL;
|
||||
value = rocksdb_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, (char *)key, klen, &vlen, &err);
|
||||
if (NULL != err) {
|
||||
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
|
||||
rocksdb_free(err);
|
||||
}
|
||||
|
||||
pLastCol = tsdbCacheDeserialize(value);
|
||||
|
||||
return pLastCol;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
int idx;
|
||||
SLastKey key;
|
||||
|
@ -1052,6 +1033,25 @@ static int32_t mergeLastCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SC
|
|||
static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SCacheRowsReader *pr, int16_t *aCols,
|
||||
int nCols, int16_t *slotIds);
|
||||
#ifdef BUILD_NO_CALL
|
||||
static SLastCol *tsdbCacheLookup(STsdb *pTsdb, tb_uid_t uid, int16_t cid, int8_t ltype) {
|
||||
SLastCol *pLastCol = NULL;
|
||||
|
||||
char *err = NULL;
|
||||
size_t vlen = 0;
|
||||
SLastKey *key = &(SLastKey){.ltype = ltype, .uid = uid, .cid = cid};
|
||||
size_t klen = ROCKS_KEY_LEN;
|
||||
char *value = NULL;
|
||||
value = rocksdb_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, (char *)key, klen, &vlen, &err);
|
||||
if (NULL != err) {
|
||||
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
|
||||
rocksdb_free(err);
|
||||
}
|
||||
|
||||
pLastCol = tsdbCacheDeserialize(value);
|
||||
|
||||
return pLastCol;
|
||||
}
|
||||
|
||||
int32_t tsdbCacheGetSlow(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int8_t ltype) {
|
||||
rocksdb_writebatch_t *wb = NULL;
|
||||
int32_t code = 0;
|
||||
|
@ -1233,10 +1233,10 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
|
|||
int16_t *lastSlotIds = taosMemoryMalloc(num_keys * sizeof(int16_t));
|
||||
int16_t *lastrowColIds = taosMemoryMalloc(num_keys * sizeof(int16_t));
|
||||
int16_t *lastrowSlotIds = taosMemoryMalloc(num_keys * sizeof(int16_t));
|
||||
SArray* lastTmpColArray = NULL;
|
||||
SArray* lastTmpIndexArray = NULL;
|
||||
SArray* lastrowTmpColArray = NULL;
|
||||
SArray* lastrowTmpIndexArray = NULL;
|
||||
SArray *lastTmpColArray = NULL;
|
||||
SArray *lastTmpIndexArray = NULL;
|
||||
SArray *lastrowTmpColArray = NULL;
|
||||
SArray *lastrowTmpIndexArray = NULL;
|
||||
|
||||
int lastIndex = 0;
|
||||
int lastrowIndex = 0;
|
||||
|
@ -1245,7 +1245,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
|
|||
SIdxKey *idxKey = taosArrayGet(remainCols, i);
|
||||
slotIds[i] = pr->pSlotIds[idxKey->idx];
|
||||
if (idxKey->key.ltype == CACHESCAN_RETRIEVE_LAST >> 3) {
|
||||
if(NULL == lastTmpIndexArray) {
|
||||
if (NULL == lastTmpIndexArray) {
|
||||
lastTmpIndexArray = taosArrayInit(num_keys, sizeof(int32_t));
|
||||
}
|
||||
taosArrayPush(lastTmpIndexArray, &(i));
|
||||
|
@ -1253,7 +1253,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
|
|||
lastSlotIds[lastIndex] = pr->pSlotIds[idxKey->idx];
|
||||
lastIndex++;
|
||||
} else {
|
||||
if(NULL == lastrowTmpIndexArray) {
|
||||
if (NULL == lastrowTmpIndexArray) {
|
||||
lastrowTmpIndexArray = taosArrayInit(num_keys, sizeof(int32_t));
|
||||
}
|
||||
taosArrayPush(lastrowTmpIndexArray, &(i));
|
||||
|
@ -1265,17 +1265,18 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
|
|||
|
||||
pTmpColArray = taosArrayInit(lastIndex + lastrowIndex, sizeof(SLastCol));
|
||||
|
||||
if(lastTmpIndexArray != NULL) {
|
||||
if (lastTmpIndexArray != NULL) {
|
||||
mergeLastCid(uid, pTsdb, &lastTmpColArray, pr, lastColIds, lastIndex, lastSlotIds);
|
||||
for(int i = 0; i < taosArrayGetSize(lastTmpColArray); i++) {
|
||||
taosArrayInsert(pTmpColArray, *(int32_t*)taosArrayGet(lastTmpIndexArray, i), taosArrayGet(lastTmpColArray, i));
|
||||
for (int i = 0; i < taosArrayGetSize(lastTmpColArray); i++) {
|
||||
taosArrayInsert(pTmpColArray, *(int32_t *)taosArrayGet(lastTmpIndexArray, i), taosArrayGet(lastTmpColArray, i));
|
||||
}
|
||||
}
|
||||
|
||||
if(lastrowTmpIndexArray != NULL) {
|
||||
if (lastrowTmpIndexArray != NULL) {
|
||||
mergeLastRowCid(uid, pTsdb, &lastrowTmpColArray, pr, lastrowColIds, lastrowIndex, lastrowSlotIds);
|
||||
for(int i = 0; i < taosArrayGetSize(lastrowTmpColArray); i++) {
|
||||
taosArrayInsert(pTmpColArray, *(int32_t*)taosArrayGet(lastrowTmpIndexArray, i), taosArrayGet(lastrowTmpColArray, i));
|
||||
for (int i = 0; i < taosArrayGetSize(lastrowTmpColArray); i++) {
|
||||
taosArrayInsert(pTmpColArray, *(int32_t *)taosArrayGet(lastrowTmpIndexArray, i),
|
||||
taosArrayGet(lastrowTmpColArray, i));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -128,12 +128,12 @@ void initTqAPI(SStoreTqReader* pTq) {
|
|||
pTq->tqReaderCurrentBlockConsumed = tqCurrentBlockConsumed;
|
||||
|
||||
pTq->tqReaderGetWalReader = tqGetWalReader; // todo remove it
|
||||
pTq->tqReaderRetrieveTaosXBlock = tqRetrieveTaosxBlock; // todo remove it
|
||||
// pTq->tqReaderRetrieveTaosXBlock = tqRetrieveTaosxBlock; // todo remove it
|
||||
|
||||
pTq->tqReaderSetSubmitMsg = tqReaderSetSubmitMsg; // todo remove it
|
||||
pTq->tqGetResultBlock = tqGetResultBlock;
|
||||
|
||||
pTq->tqReaderNextBlockFilterOut = tqNextDataBlockFilterOut;
|
||||
// pTq->tqReaderNextBlockFilterOut = tqNextDataBlockFilterOut;
|
||||
pTq->tqGetResultBlockTime = tqGetResultBlockTime;
|
||||
|
||||
pTq->tqGetStreamExecProgress = tqGetStreamExecInfo;
|
||||
|
|
|
@ -372,8 +372,8 @@ int32_t vnodeProcessSyncMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
|
|||
|
||||
int32_t code = syncProcessMsg(pVnode->sync, pMsg);
|
||||
if (code != 0) {
|
||||
vGError("vgId:%d, failed to process sync msg:%p type:%s since %s", pVnode->config.vgId, pMsg,
|
||||
TMSG_INFO(pMsg->msgType), terrstr());
|
||||
vGError("vgId:%d, failed to process sync msg:%p type:%s, errno: %s, code:0x%x", pVnode->config.vgId, pMsg,
|
||||
TMSG_INFO(pMsg->msgType), terrstr(), code);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
|
|
@ -94,10 +94,10 @@ int32_t doCountWindowAggImpl(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
|
|||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
for (int32_t i = 0; i < pBlock->info.rows;) {
|
||||
int32_t step = pInfo->windowSliding;
|
||||
SCountWindowResult* pBuffInfo = setCountWindowOutputBuff(pExprSup, &pInfo->countSup, &pInfo->pRow);
|
||||
int32_t prevRows = pBuffInfo->winRows;
|
||||
int32_t num = updateCountWindowInfo(i, pBlock->info.rows, pInfo->windowCount, &pBuffInfo->winRows);
|
||||
int32_t step = num;
|
||||
if (prevRows == 0) {
|
||||
pInfo->pRow->win.skey = tsCols[i];
|
||||
}
|
||||
|
@ -118,6 +118,8 @@ int32_t doCountWindowAggImpl(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
|
|||
if (prevRows <= pInfo->windowSliding) {
|
||||
if (pBuffInfo->winRows > pInfo->windowSliding) {
|
||||
step = pInfo->windowSliding - prevRows;
|
||||
} else {
|
||||
step = pInfo->windowSliding;
|
||||
}
|
||||
} else {
|
||||
step = 0;
|
||||
|
|
|
@ -456,7 +456,8 @@ static int32_t buildSeqStbJoinOperatorParam(SDynQueryCtrlOperatorInfo* pInfo, SS
|
|||
code = pInfo->stbJoin.basic.srcScan[1] ? buildSingleTableScanOperatorParam(&pSrcParam1, 1, rightVg, rightUid) : buildExchangeOperatorParam(&pSrcParam1, 1, rightVg, rightUid);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool initParam = pSrcParam0 ? true : false;
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = buildGroupCacheOperatorParam(&pGcParam0, 0, *leftVg, *leftUid, pPost->leftNeedCache, pSrcParam0);
|
||||
pSrcParam0 = NULL;
|
||||
|
@ -466,7 +467,7 @@ static int32_t buildSeqStbJoinOperatorParam(SDynQueryCtrlOperatorInfo* pInfo, SS
|
|||
pSrcParam1 = NULL;
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = buildMergeJoinOperatorParam(ppParam, pSrcParam0 ? true : false, pGcParam0, pGcParam1);
|
||||
code = buildMergeJoinOperatorParam(ppParam, initParam, pGcParam0, pGcParam1);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
if (pSrcParam0) {
|
||||
|
|
|
@ -1009,6 +1009,22 @@ int32_t qSetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo) {
|
|||
pSup->deleteMark = INT64_MAX;
|
||||
pInfo->ignoreExpiredDataSaved = pInfo->ignoreExpiredData;
|
||||
pInfo->ignoreExpiredData = false;
|
||||
} else if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT) {
|
||||
SStreamCountAggOperatorInfo* pInfo = pOperator->info;
|
||||
STimeWindowAggSupp* pSup = &pInfo->twAggSup;
|
||||
|
||||
ASSERT(pSup->calTrigger == STREAM_TRIGGER_AT_ONCE || pSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE);
|
||||
ASSERT(pSup->calTriggerSaved == 0 && pSup->deleteMarkSaved == 0);
|
||||
|
||||
qInfo("save stream param for state: %d, %" PRId64, pSup->calTrigger, pSup->deleteMark);
|
||||
|
||||
pSup->calTriggerSaved = pSup->calTrigger;
|
||||
pSup->deleteMarkSaved = pSup->deleteMark;
|
||||
pSup->calTrigger = STREAM_TRIGGER_AT_ONCE;
|
||||
pSup->deleteMark = INT64_MAX;
|
||||
pInfo->ignoreExpiredDataSaved = pInfo->ignoreExpiredData;
|
||||
pInfo->ignoreExpiredData = false;
|
||||
qInfo("save stream task:%s, param for state: %d", GET_TASKID(pTaskInfo), pInfo->ignoreExpiredData);
|
||||
}
|
||||
|
||||
// iterate operator tree
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -3763,8 +3763,9 @@ static int32_t stopSubTablesTableMergeScan(STableMergeScanInfo* pInfo) {
|
|||
|
||||
taosMemoryFree(pSubTblsInfo);
|
||||
pInfo->pSubTablesMergeInfo = NULL;
|
||||
|
||||
taosMemoryTrim(0);
|
||||
}
|
||||
taosMemoryTrim(0);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -390,10 +390,12 @@ void* doStreamCountDecodeOpState(void* buf, int32_t len, SOperatorInfo* pOperato
|
|||
buf = taosDecodeFixedI32(buf, &mapSize);
|
||||
for (int32_t i = 0; i < mapSize; i++) {
|
||||
SSessionKey key = {0};
|
||||
SResultWindowInfo winfo = {0};
|
||||
SCountWindowInfo curWin = {0};
|
||||
buf = decodeSSessionKey(buf, &key);
|
||||
buf = decodeSResultWindowInfo(buf, &winfo, pInfo->streamAggSup.resultRowSize);
|
||||
tSimpleHashPut(pInfo->streamAggSup.pResultRows, &key, sizeof(SSessionKey), &winfo, sizeof(SResultWindowInfo));
|
||||
SBuffInfo buffInfo = {.rebuildWindow = false, .winBuffOp = NONE_WINDOW, .pCur = NULL};
|
||||
setCountOutputBuf(&pInfo->streamAggSup, key.win.skey, key.groupId, &curWin, &buffInfo);
|
||||
buf = decodeSResultWindowInfo(buf, &curWin.winInfo, pInfo->streamAggSup.resultRowSize);
|
||||
tSimpleHashPut(pInfo->streamAggSup.pResultRows, &key, sizeof(SSessionKey), &curWin.winInfo, sizeof(SResultWindowInfo));
|
||||
}
|
||||
|
||||
// 2.twAggSup
|
||||
|
@ -694,6 +696,8 @@ SOperatorInfo* createStreamCountAggOperatorInfo(SOperatorInfo* downstream, SPhys
|
|||
pInfo->recvGetAll = false;
|
||||
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT;
|
||||
setOperatorInfo(pOperator, getStreamOpName(pOperator->operatorType), QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT, true,
|
||||
OP_NOT_OPENED, pInfo, pTaskInfo);
|
||||
// for stream
|
||||
void* buff = NULL;
|
||||
int32_t len = 0;
|
||||
|
@ -704,8 +708,6 @@ SOperatorInfo* createStreamCountAggOperatorInfo(SOperatorInfo* downstream, SPhys
|
|||
doStreamCountDecodeOpState(buff, len, pOperator, true);
|
||||
taosMemoryFree(buff);
|
||||
}
|
||||
setOperatorInfo(pOperator, getStreamOpName(pOperator->operatorType), QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT, true,
|
||||
OP_NOT_OPENED, pInfo, pTaskInfo);
|
||||
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doStreamCountAgg, NULL, destroyStreamCountAggOperatorInfo,
|
||||
optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL);
|
||||
setOperatorStreamStateFn(pOperator, streamCountReleaseState, streamCountReloadState);
|
||||
|
|
|
@ -406,6 +406,7 @@ void* doStreamEventDecodeOpState(void* buf, int32_t len, SOperatorInfo* pOperato
|
|||
if (!pInfo) {
|
||||
return buf;
|
||||
}
|
||||
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
|
||||
|
||||
// 4.checksum
|
||||
int32_t dataLen = len - sizeof(uint32_t);
|
||||
|
@ -423,6 +424,8 @@ void* doStreamEventDecodeOpState(void* buf, int32_t len, SOperatorInfo* pOperato
|
|||
SSessionKey key = {0};
|
||||
SResultWindowInfo winfo = {0};
|
||||
buf = decodeSSessionKey(buf, &key);
|
||||
pAggSup->stateStore.streamStateSessionAddIfNotExist(pAggSup->pState, &winfo.sessionWin, pAggSup->gap,
|
||||
(void**)&winfo.pStatePos, &pAggSup->resultRowSize);
|
||||
buf = decodeSResultWindowInfo(buf, &winfo, pInfo->streamAggSup.resultRowSize);
|
||||
tSimpleHashPut(pInfo->streamAggSup.pResultRows, &key, sizeof(SSessionKey), &winfo, sizeof(SResultWindowInfo));
|
||||
}
|
||||
|
@ -735,6 +738,8 @@ SOperatorInfo* createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhys
|
|||
pInfo->reCkBlock = false;
|
||||
pInfo->recvGetAll = false;
|
||||
|
||||
setOperatorInfo(pOperator, "StreamEventAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT, true, OP_NOT_OPENED,
|
||||
pInfo, pTaskInfo);
|
||||
// for stream
|
||||
void* buff = NULL;
|
||||
int32_t len = 0;
|
||||
|
@ -746,8 +751,6 @@ SOperatorInfo* createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhys
|
|||
taosMemoryFree(buff);
|
||||
}
|
||||
|
||||
setOperatorInfo(pOperator, "StreamEventAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT, true, OP_NOT_OPENED,
|
||||
pInfo, pTaskInfo);
|
||||
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doStreamEventAgg, NULL, destroyStreamEventOperatorInfo,
|
||||
optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL);
|
||||
setOperatorStreamStateFn(pOperator, streamEventReleaseState, streamEventReloadState);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -2533,7 +2533,6 @@ int32_t encodeSResultWindowInfo(void** buf, SResultWindowInfo* key, int32_t outL
|
|||
|
||||
void* decodeSResultWindowInfo(void* buf, SResultWindowInfo* key, int32_t outLen) {
|
||||
buf = taosDecodeFixedBool(buf, &key->isOutput);
|
||||
key->pStatePos->pRowBuff = NULL;
|
||||
buf = decodeSSessionKey(buf, &key->sessionWin);
|
||||
return buf;
|
||||
}
|
||||
|
@ -2591,6 +2590,7 @@ void* doStreamSessionDecodeOpState(void* buf, int32_t len, SOperatorInfo* pOpera
|
|||
if (!pInfo) {
|
||||
return buf;
|
||||
}
|
||||
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
|
||||
|
||||
// 5.checksum
|
||||
if (isParent) {
|
||||
|
@ -2609,6 +2609,8 @@ void* doStreamSessionDecodeOpState(void* buf, int32_t len, SOperatorInfo* pOpera
|
|||
SSessionKey key = {0};
|
||||
SResultWindowInfo winfo = {0};
|
||||
buf = decodeSSessionKey(buf, &key);
|
||||
pAggSup->stateStore.streamStateSessionAddIfNotExist(pAggSup->pState, &winfo.sessionWin, pAggSup->gap,
|
||||
(void**)&winfo.pStatePos, &pAggSup->resultRowSize);
|
||||
buf = decodeSResultWindowInfo(buf, &winfo, pInfo->streamAggSup.resultRowSize);
|
||||
tSimpleHashPut(pInfo->streamAggSup.pResultRows, &key, sizeof(SSessionKey), &winfo, sizeof(SResultWindowInfo));
|
||||
}
|
||||
|
@ -2992,6 +2994,8 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
|
|||
pInfo->recvGetAll = false;
|
||||
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION;
|
||||
setOperatorInfo(pOperator, getStreamOpName(pOperator->operatorType), QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION, true,
|
||||
OP_NOT_OPENED, pInfo, pTaskInfo);
|
||||
// for stream
|
||||
void* buff = NULL;
|
||||
int32_t len = 0;
|
||||
|
@ -3002,8 +3006,6 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
|
|||
doStreamSessionDecodeOpState(buff, len, pOperator, true);
|
||||
taosMemoryFree(buff);
|
||||
}
|
||||
setOperatorInfo(pOperator, getStreamOpName(pOperator->operatorType), QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION, true,
|
||||
OP_NOT_OPENED, pInfo, pTaskInfo);
|
||||
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doStreamSessionAgg, NULL, destroyStreamSessionAggOperatorInfo,
|
||||
optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL);
|
||||
setOperatorStreamStateFn(pOperator, streamSessionReleaseState, streamSessionReloadState);
|
||||
|
@ -3538,6 +3540,7 @@ void* doStreamStateDecodeOpState(void* buf, int32_t len, SOperatorInfo* pOperato
|
|||
if (!pInfo) {
|
||||
return buf;
|
||||
}
|
||||
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
|
||||
|
||||
// 5.checksum
|
||||
if (isParent) {
|
||||
|
@ -3556,6 +3559,9 @@ void* doStreamStateDecodeOpState(void* buf, int32_t len, SOperatorInfo* pOperato
|
|||
SSessionKey key = {0};
|
||||
SResultWindowInfo winfo = {0};
|
||||
buf = decodeSSessionKey(buf, &key);
|
||||
pAggSup->stateStore.streamStateStateAddIfNotExist(pAggSup->pState, &winfo.sessionWin, NULL,
|
||||
pAggSup->stateKeySize, compareStateKey,
|
||||
(void**)&winfo.pStatePos, &pAggSup->resultRowSize);
|
||||
buf = decodeSResultWindowInfo(buf, &winfo, pInfo->streamAggSup.resultRowSize);
|
||||
tSimpleHashPut(pInfo->streamAggSup.pResultRows, &key, sizeof(SSessionKey), &winfo, sizeof(SResultWindowInfo));
|
||||
}
|
||||
|
@ -3873,6 +3879,8 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
|
|||
pInfo->pCheckpointRes = createSpecialDataBlock(STREAM_CHECKPOINT);
|
||||
pInfo->recvGetAll = false;
|
||||
|
||||
setOperatorInfo(pOperator, "StreamStateAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE, true, OP_NOT_OPENED,
|
||||
pInfo, pTaskInfo);
|
||||
// for stream
|
||||
void* buff = NULL;
|
||||
int32_t len = 0;
|
||||
|
@ -3884,8 +3892,6 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
|
|||
taosMemoryFree(buff);
|
||||
}
|
||||
|
||||
setOperatorInfo(pOperator, "StreamStateAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE, true, OP_NOT_OPENED,
|
||||
pInfo, pTaskInfo);
|
||||
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doStreamStateAgg, NULL, destroyStreamStateOperatorInfo,
|
||||
optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL);
|
||||
setOperatorStreamStateFn(pOperator, streamStateReleaseState, streamStateReloadState);
|
||||
|
|
|
@ -1199,6 +1199,18 @@ static int32_t createBlocksMergeSortInitialSources(SSortHandle* pHandle) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static void freeSSortSource(SSortSource* source) {
|
||||
if (NULL == source) return;
|
||||
if (source->param && !source->onlyRef) {
|
||||
taosMemoryFree(source->param);
|
||||
}
|
||||
if (!source->onlyRef && source->src.pBlock) {
|
||||
blockDataDestroy(source->src.pBlock);
|
||||
source->src.pBlock = NULL;
|
||||
}
|
||||
taosMemoryFree(source);
|
||||
}
|
||||
|
||||
static int32_t createBlocksQuickSortInitialSources(SSortHandle* pHandle) {
|
||||
int32_t code = 0;
|
||||
size_t sortBufSize = pHandle->numOfPages * pHandle->pageSize;
|
||||
|
@ -1231,14 +1243,7 @@ static int32_t createBlocksQuickSortInitialSources(SSortHandle* pHandle) {
|
|||
|
||||
code = blockDataMerge(pHandle->pDataBlock, pBlock);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
if (source->param && !source->onlyRef) {
|
||||
taosMemoryFree(source->param);
|
||||
}
|
||||
if (!source->onlyRef && source->src.pBlock) {
|
||||
blockDataDestroy(source->src.pBlock);
|
||||
source->src.pBlock = NULL;
|
||||
}
|
||||
taosMemoryFree(source);
|
||||
freeSSortSource(source);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1248,15 +1253,7 @@ static int32_t createBlocksQuickSortInitialSources(SSortHandle* pHandle) {
|
|||
int64_t p = taosGetTimestampUs();
|
||||
code = blockDataSort(pHandle->pDataBlock, pHandle->pSortInfo);
|
||||
if (code != 0) {
|
||||
if (source->param && !source->onlyRef) {
|
||||
taosMemoryFree(source->param);
|
||||
}
|
||||
if (!source->onlyRef && source->src.pBlock) {
|
||||
blockDataDestroy(source->src.pBlock);
|
||||
source->src.pBlock = NULL;
|
||||
}
|
||||
|
||||
taosMemoryFree(source);
|
||||
freeSSortSource(source);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1265,16 +1262,13 @@ static int32_t createBlocksQuickSortInitialSources(SSortHandle* pHandle) {
|
|||
if (pHandle->pqMaxRows > 0) blockDataKeepFirstNRows(pHandle->pDataBlock, pHandle->pqMaxRows);
|
||||
code = doAddToBuf(pHandle->pDataBlock, pHandle);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
freeSSortSource(source);
|
||||
return code;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (source->param && !source->onlyRef) {
|
||||
taosMemoryFree(source->param);
|
||||
}
|
||||
|
||||
taosMemoryFree(source);
|
||||
freeSSortSource(source);
|
||||
|
||||
if (pHandle->pDataBlock != NULL && pHandle->pDataBlock->info.rows > 0) {
|
||||
size_t size = blockDataGetSize(pHandle->pDataBlock);
|
||||
|
|
|
@ -76,8 +76,8 @@ char* idxInt2str(int64_t val, char* dst, int radix) {
|
|||
return dst - 1;
|
||||
}
|
||||
__compar_fn_t idxGetCompar(int8_t type) {
|
||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_VARBINARY ||
|
||||
type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_VARBINARY || type == TSDB_DATA_TYPE_NCHAR ||
|
||||
type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
return (__compar_fn_t)strcmp;
|
||||
}
|
||||
return getComparFunc(type, 0);
|
||||
|
@ -108,8 +108,8 @@ static FORCE_INLINE TExeCond tCompareEqual(void* a, void* b, int8_t type) {
|
|||
return tCompare(func, QUERY_TERM, a, b, type);
|
||||
}
|
||||
TExeCond tCompare(__compar_fn_t func, int8_t cmptype, void* a, void* b, int8_t dtype) {
|
||||
if (dtype == TSDB_DATA_TYPE_BINARY || dtype == TSDB_DATA_TYPE_NCHAR ||
|
||||
dtype == TSDB_DATA_TYPE_VARBINARY || dtype == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
if (dtype == TSDB_DATA_TYPE_BINARY || dtype == TSDB_DATA_TYPE_NCHAR || dtype == TSDB_DATA_TYPE_VARBINARY ||
|
||||
dtype == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
return tDoCompare(func, cmptype, a, b);
|
||||
}
|
||||
#if 1
|
||||
|
@ -290,6 +290,7 @@ int idxUidCompare(const void* a, const void* b) {
|
|||
uint64_t r = *(uint64_t*)b;
|
||||
return l - r;
|
||||
}
|
||||
#ifdef BUILD_NO_CALL
|
||||
int32_t idxConvertData(void* src, int8_t type, void** dst) {
|
||||
int tlen = -1;
|
||||
switch (type) {
|
||||
|
@ -372,6 +373,8 @@ int32_t idxConvertData(void* src, int8_t type, void** dst) {
|
|||
// indexMayFillNumbericData(*dst, tlen);
|
||||
return tlen;
|
||||
}
|
||||
#endif
|
||||
|
||||
int32_t idxConvertDataToStr(void* src, int8_t type, void** dst) {
|
||||
if (src == NULL) {
|
||||
*dst = strndup(INDEX_DATA_NULL_STR, (int)strlen(INDEX_DATA_NULL_STR));
|
||||
|
|
|
@ -216,7 +216,7 @@ int taos_collector_registry_validate_metric_name(taos_collector_registry_t *self
|
|||
regfree(&r);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
const char *taos_collector_registry_bridge(taos_collector_registry_t *self, char *ts, char *format) {
|
||||
taos_metric_formatter_clear(self->metric_formatter);
|
||||
taos_metric_formatter_load_metrics(self->metric_formatter, self->collectors, ts, format);
|
||||
|
@ -229,7 +229,7 @@ const char *taos_collector_registry_bridge(taos_collector_registry_t *self, char
|
|||
|
||||
return taos_string_builder_str(self->string_builder_batch);
|
||||
}
|
||||
|
||||
*/
|
||||
int taos_collector_registry_clear_batch(taos_collector_registry_t *self){
|
||||
return taos_string_builder_clear(self->string_builder_batch);
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ int taos_gauge_destroy(taos_gauge_t *self) {
|
|||
self = NULL;
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
int taos_gauge_inc(taos_gauge_t *self, const char **label_values) {
|
||||
TAOS_ASSERT(self != NULL);
|
||||
if (self == NULL) return 1;
|
||||
|
@ -86,7 +86,7 @@ int taos_gauge_sub(taos_gauge_t *self, double r_value, const char **label_values
|
|||
if (sample == NULL) return 1;
|
||||
return taos_metric_sample_sub(sample, r_value);
|
||||
}
|
||||
|
||||
*/
|
||||
int taos_gauge_set(taos_gauge_t *self, double r_value, const char **label_values) {
|
||||
TAOS_ASSERT(self != NULL);
|
||||
if (self == NULL) return 1;
|
||||
|
|
|
@ -63,7 +63,7 @@ int taos_metric_formatter_destroy(taos_metric_formatter_t *self) {
|
|||
self = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
int taos_metric_formatter_load_help(taos_metric_formatter_t *self, const char *name, const char *help) {
|
||||
TAOS_ASSERT(self != NULL);
|
||||
if (self == NULL) return 1;
|
||||
|
@ -105,7 +105,7 @@ int taos_metric_formatter_load_type(taos_metric_formatter_t *self, const char *n
|
|||
|
||||
return taos_string_builder_add_char(self->string_builder, '\n');
|
||||
}
|
||||
|
||||
*/
|
||||
int taos_metric_formatter_load_l_value(taos_metric_formatter_t *self, const char *name, const char *suffix,
|
||||
size_t label_count, const char **label_keys, const char **label_values) {
|
||||
TAOS_ASSERT(self != NULL);
|
||||
|
@ -156,7 +156,7 @@ int taos_metric_formatter_load_l_value(taos_metric_formatter_t *self, const char
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
int taos_metric_formatter_load_sample(taos_metric_formatter_t *self, taos_metric_sample_t *sample,
|
||||
char *ts, char *format) {
|
||||
TAOS_ASSERT(self != NULL);
|
||||
|
@ -185,7 +185,7 @@ int taos_metric_formatter_load_sample(taos_metric_formatter_t *self, taos_metric
|
|||
|
||||
return taos_string_builder_add_char(self->string_builder, '\n');
|
||||
}
|
||||
|
||||
*/
|
||||
int taos_metric_formatter_clear(taos_metric_formatter_t *self) {
|
||||
TAOS_ASSERT(self != NULL);
|
||||
return taos_string_builder_clear(self->string_builder);
|
||||
|
@ -204,7 +204,7 @@ char *taos_metric_formatter_dump(taos_metric_formatter_t *self) {
|
|||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
/*
|
||||
int taos_metric_formatter_load_metric(taos_metric_formatter_t *self, taos_metric_t *metric, char *ts, char *format) {
|
||||
TAOS_ASSERT(self != NULL);
|
||||
if (self == NULL) return 1;
|
||||
|
@ -255,3 +255,4 @@ int taos_metric_formatter_load_metrics(taos_metric_formatter_t *self, taos_map_t
|
|||
}
|
||||
return r;
|
||||
}
|
||||
*/
|
|
@ -91,6 +91,7 @@ int taos_metric_sample_add(taos_metric_sample_t *self, double r_value) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
int taos_metric_sample_sub(taos_metric_sample_t *self, double r_value) {
|
||||
TAOS_ASSERT(self != NULL);
|
||||
if (self->type != TAOS_GAUGE) {
|
||||
|
@ -99,7 +100,8 @@ int taos_metric_sample_sub(taos_metric_sample_t *self, double r_value) {
|
|||
}
|
||||
|
||||
#ifdef C11_ATOMIC
|
||||
/*_Atomic*/ double old = atomic_load(&self->r_value);
|
||||
///_Atomic/
|
||||
double old = atomic_load(&self->r_value);
|
||||
for (;;) {
|
||||
_Atomic double new = ATOMIC_VAR_INIT(old - r_value);
|
||||
if (atomic_compare_exchange_weak(&self->r_value, &old, new)) {
|
||||
|
@ -116,6 +118,7 @@ int taos_metric_sample_sub(taos_metric_sample_t *self, double r_value) {
|
|||
|
||||
return 0;
|
||||
}
|
||||
*/
|
||||
|
||||
int taos_metric_sample_set(taos_metric_sample_t *self, double r_value) {
|
||||
if (self->type != TAOS_GAUGE && self->type != TAOS_COUNTER) {
|
||||
|
|
|
@ -4010,6 +4010,26 @@ static int32_t translateEventWindow(STranslateContext* pCxt, SSelectStmt* pSelec
|
|||
}
|
||||
|
||||
static int32_t translateCountWindow(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||
SCountWindowNode* pCountWin = (SCountWindowNode*)pSelect->pWindow;
|
||||
if (pCountWin->windowCount <= 1) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"Size of Count window must exceed 1.");
|
||||
}
|
||||
|
||||
if (pCountWin->windowSliding <= 0) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"Size of Count window must exceed 0.");
|
||||
}
|
||||
|
||||
if (pCountWin->windowSliding > pCountWin->windowCount) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"sliding value no larger than the count value.");
|
||||
}
|
||||
|
||||
if (pCountWin->windowCount > INT32_MAX) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"Size of Count window must less than 2147483647(INT32_MAX).");
|
||||
}
|
||||
if (QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) &&
|
||||
!isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TIMELINE_QUERY,
|
||||
|
@ -4528,7 +4548,7 @@ static int32_t translateWhere(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = getQueryTimeRange(pCxt, pSelect->pWhere, &pSelect->timeRange);
|
||||
}
|
||||
if (pSelect->pWhere != NULL) {
|
||||
if (pSelect->pWhere != NULL && pCxt->pParseCxt->topicQuery == false) {
|
||||
setTableVgroupsFromEqualTbnameCond(pCxt, pSelect);
|
||||
}
|
||||
return code;
|
||||
|
@ -7829,29 +7849,7 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm
|
|||
if (pStmt->pOptions->ignoreExpired != 1) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"Ignore expired data of Count window must be 1.");
|
||||
}
|
||||
|
||||
SCountWindowNode* pCountWin = (SCountWindowNode*)pSelect->pWindow;
|
||||
if (pCountWin->windowCount <= 1) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"Size of Count window must exceed 1.");
|
||||
}
|
||||
|
||||
if (pCountWin->windowSliding <= 0) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"Size of Count window must exceed 0.");
|
||||
}
|
||||
|
||||
if (pCountWin->windowSliding > pCountWin->windowCount) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"sliding value no larger than the count value.");
|
||||
}
|
||||
|
||||
if (pCountWin->windowCount > INT32_MAX) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"Size of Count window must less than 2147483647(INT32_MAX).");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -8138,7 +8136,9 @@ static int32_t adjustTagsForCreateTable(STranslateContext* pCxt, SCreateStreamSt
|
|||
SColumnDefNode* pDef = (SColumnDefNode*)pTagDef;
|
||||
if (!dataTypeEqual(&pDef->dataType, &((SExprNode*)pTagExpr)->resType)) {
|
||||
SNode* pFunc = NULL;
|
||||
int32_t code = createCastFunc(pCxt, pTagExpr, pDef->dataType, &pFunc);
|
||||
SDataType defType = pDef->dataType;
|
||||
defType.bytes = calcTypeBytes(defType);
|
||||
int32_t code = createCastFunc(pCxt, pTagExpr, defType, &pFunc);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -1016,10 +1016,6 @@ static int32_t createWindowLogicNodeByCount(SLogicPlanContext* pCxt, SCountWindo
|
|||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
if (!pCxt->pPlanCxt->streamQuery && tsDisableCount) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
pWindow->winType = WINDOW_TYPE_COUNT;
|
||||
pWindow->node.groupAction = getGroupAction(pCxt, pSelect);
|
||||
pWindow->node.requireDataOrder =
|
||||
|
|
|
@ -274,7 +274,7 @@ static bool stbSplNeedSplitWindow(bool streamQuery, SLogicNode* pNode) {
|
|||
}
|
||||
}
|
||||
|
||||
if (WINDOW_TYPE_STATE == pWindow->winType) {
|
||||
if (WINDOW_TYPE_STATE == pWindow->winType || WINDOW_TYPE_COUNT == pWindow->winType) {
|
||||
if (!streamQuery) {
|
||||
return stbSplHasMultiTbScan(streamQuery, pNode);
|
||||
} else {
|
||||
|
|
|
@ -715,6 +715,7 @@ int32_t qWorkerProcessDeleteMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, SD
|
|||
uint64_t tId = req.taskId;
|
||||
int64_t rId = 0;
|
||||
int32_t eId = -1;
|
||||
pRes->source = req.source;
|
||||
|
||||
SQWMsg qwMsg = {.node = node, .msg = req.msg, .msgLen = req.phyLen, .connInfo = pMsg->info};
|
||||
QW_SCH_TASK_DLOG("processDelete start, node:%p, handle:%p, sql:%s", node, pMsg->info.handle, req.sql);
|
||||
|
|
|
@ -304,6 +304,7 @@ typedef struct SSchJob {
|
|||
SSchResInfo userRes;
|
||||
char *sql;
|
||||
SQueryProfileSummary summary;
|
||||
int8_t source;
|
||||
} SSchJob;
|
||||
|
||||
typedef struct SSchTaskCtx {
|
||||
|
|
|
@ -746,6 +746,7 @@ int32_t schInitJob(int64_t *pJobId, SSchedulerReq *pReq) {
|
|||
pJob->chkKillParam = pReq->chkKillParam;
|
||||
pJob->userRes.execFp = pReq->execFp;
|
||||
pJob->userRes.cbParam = pReq->cbParam;
|
||||
pJob->source = pReq->source;
|
||||
|
||||
if (pReq->pNodeList == NULL || taosArrayGetSize(pReq->pNodeList) <= 0) {
|
||||
qDebug("QID:0x%" PRIx64 " input exec nodeList is empty", pReq->pDag->queryId);
|
||||
|
|
|
@ -940,6 +940,10 @@ int32_t schAsyncSendMsg(SSchJob *pJob, SSchTask *pTask, SSchTrans *trans, SQuery
|
|||
SCH_ERR_JRET(schGenerateCallBackInfo(pJob, pTask, msg, msgSize, msgType, trans, isHb, &pMsgSendInfo));
|
||||
SCH_ERR_JRET(schUpdateSendTargetInfo(pMsgSendInfo, addr, pTask));
|
||||
|
||||
if (isHb && persistHandle && trans->pHandle == 0) {
|
||||
trans->pHandle = rpcAllocHandle();
|
||||
}
|
||||
|
||||
if (pJob && pTask) {
|
||||
SCH_TASK_DLOG("start to send %s msg to node[%d,%s,%d], pTrans:%p, pHandle:%p", TMSG_INFO(msgType), addr->nodeId,
|
||||
epSet->eps[epSet->inUse].fqdn, epSet->eps[epSet->inUse].port, trans->pTrans, trans->pHandle);
|
||||
|
@ -1086,6 +1090,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
|
|||
req.sqlLen = strlen(pJob->sql);
|
||||
req.sql = (char *)pJob->sql;
|
||||
req.msg = pTask->msg;
|
||||
req.source = pJob->source;
|
||||
msgSize = tSerializeSVDeleteReq(NULL, 0, &req);
|
||||
msg = taosMemoryCalloc(1, msgSize);
|
||||
if (NULL == msg) {
|
||||
|
|
|
@ -17,10 +17,14 @@
|
|||
#define _STREAM_BACKEDN_ROCKSDB_H_
|
||||
|
||||
#include "rocksdb/c.h"
|
||||
//#include "streamInt.h"
|
||||
// #include "streamInt.h"
|
||||
#include "streamState.h"
|
||||
#include "tcommon.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct SCfComparator {
|
||||
rocksdb_comparator_t** comp;
|
||||
int32_t numOfComp;
|
||||
|
@ -244,11 +248,6 @@ int32_t streamBackendDelInUseChkp(void* arg, int64_t chkpId);
|
|||
|
||||
int32_t taskDbBuildSnap(void* arg, SArray* pSnap);
|
||||
|
||||
// int32_t streamDefaultIter_rocksdb(SStreamState* pState, const void* start, const void* end, SArray* result);
|
||||
|
||||
// STaskDbWrapper* taskDbOpen(char* path, char* key, int64_t chkpId);
|
||||
// void taskDbDestroy(void* pDb, bool flush);
|
||||
|
||||
int32_t taskDbDoCheckpoint(void* arg, int64_t chkpId);
|
||||
|
||||
SBkdMgt* bkdMgtCreate(char* path);
|
||||
|
@ -258,4 +257,10 @@ int32_t bkdMgtDumpTo(SBkdMgt* bm, char* taskId, char* dname);
|
|||
void bkdMgtDestroy(SBkdMgt* bm);
|
||||
|
||||
int32_t taskDbGenChkpUploadData(void* arg, void* bkdMgt, int64_t chkpId, int8_t type, char** path, SArray* list);
|
||||
#endif
|
||||
|
||||
uint32_t nextPow2(uint32_t x);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -99,6 +99,7 @@ void streamRetryDispatchData(SStreamTask* pTask, int64_t waitDuration);
|
|||
int32_t streamDispatchStreamBlock(SStreamTask* pTask);
|
||||
void destroyDispatchMsg(SStreamDispatchReq* pReq, int32_t numOfVgroups);
|
||||
int32_t getNumOfDispatchBranch(SStreamTask* pTask);
|
||||
void clearBufferedDispatchMsg(SStreamTask* pTask);
|
||||
|
||||
int32_t streamProcessCheckpointBlock(SStreamTask* pTask, SStreamDataBlock* pBlock);
|
||||
SStreamDataBlock* createStreamBlockFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t blockType, int32_t srcVg);
|
||||
|
|
|
@ -26,21 +26,21 @@ extern "C" {
|
|||
typedef int32_t (*__state_trans_fn)(SStreamTask*);
|
||||
typedef int32_t (*__state_trans_succ_fn)(SStreamTask*);
|
||||
|
||||
typedef struct SAttachedEventInfo {
|
||||
typedef struct SFutureHandleEventInfo {
|
||||
ETaskStatus status; // required status that this event can be handled
|
||||
EStreamTaskEvent event; // the delayed handled event
|
||||
void* pParam;
|
||||
void* pFn;
|
||||
} SAttachedEventInfo;
|
||||
__state_trans_user_fn callBackFn;
|
||||
} SFutureHandleEventInfo;
|
||||
|
||||
typedef struct STaskStateTrans {
|
||||
bool autoInvokeEndFn;
|
||||
SStreamTaskState state;
|
||||
EStreamTaskEvent event;
|
||||
SStreamTaskState next;
|
||||
__state_trans_fn pAction;
|
||||
__state_trans_succ_fn pSuccAction;
|
||||
SAttachedEventInfo attachEvent;
|
||||
bool autoInvokeEndFn;
|
||||
SStreamTaskState state;
|
||||
EStreamTaskEvent event;
|
||||
SStreamTaskState next;
|
||||
__state_trans_fn pAction;
|
||||
__state_trans_succ_fn pSuccAction;
|
||||
SFutureHandleEventInfo attachEvent;
|
||||
} STaskStateTrans;
|
||||
|
||||
struct SStreamTaskSM {
|
||||
|
|
|
@ -906,6 +906,7 @@ int32_t chkpMayDelObsolete(void* arg, int64_t chkpId, char* path) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef BUILD_NO_CALL
|
||||
static int32_t chkpIdComp(const void* a, const void* b) {
|
||||
int64_t x = *(int64_t*)a;
|
||||
int64_t y = *(int64_t*)b;
|
||||
|
@ -964,6 +965,7 @@ int32_t streamBackendLoadCheckpointInfo(void* arg) {
|
|||
taosMemoryFree(chkpPath);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef BUILD_NO_CALL
|
||||
int32_t chkpGetAllDbCfHandle(SStreamMeta* pMeta, rocksdb_column_family_handle_t*** ppHandle, SArray* refs) {
|
||||
|
@ -2788,7 +2790,6 @@ SStreamStateCur* streamStateSeekToLast_rocksdb(SStreamState* pState) {
|
|||
STREAM_STATE_DEL_ROCKSDB(pState, "state", &maxStateKey);
|
||||
return pCur;
|
||||
}
|
||||
#ifdef BUILD_NO_CALL
|
||||
SStreamStateCur* streamStateGetCur_rocksdb(SStreamState* pState, const SWinKey* key) {
|
||||
stDebug("streamStateGetCur_rocksdb");
|
||||
STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend;
|
||||
|
@ -2838,7 +2839,6 @@ int32_t streamStateFuncDel_rocksdb(SStreamState* pState, const STupleKey* key) {
|
|||
STREAM_STATE_DEL_ROCKSDB(pState, "func", key);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
// session cf
|
||||
int32_t streamStateSessionPut_rocksdb(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen) {
|
||||
|
@ -3432,7 +3432,6 @@ int32_t streamStateStateAddIfNotExist_rocksdb(SStreamState* pState, SSessionKey*
|
|||
SSessionKey tmpKey = *key;
|
||||
int32_t valSize = *pVLen;
|
||||
void* tmp = taosMemoryMalloc(valSize);
|
||||
// tdbRealloc(NULL, valSize);
|
||||
if (!tmp) {
|
||||
return -1;
|
||||
}
|
||||
|
@ -3506,13 +3505,11 @@ int32_t streamStateGetParName_rocksdb(SStreamState* pState, int64_t groupId, voi
|
|||
return code;
|
||||
}
|
||||
|
||||
#ifdef BUILD_NO_CALL
|
||||
int32_t streamDefaultPut_rocksdb(SStreamState* pState, const void* key, void* pVal, int32_t pVLen) {
|
||||
int code = 0;
|
||||
STREAM_STATE_PUT_ROCKSDB(pState, "default", key, pVal, pVLen);
|
||||
return code;
|
||||
}
|
||||
#endif
|
||||
int32_t streamDefaultGet_rocksdb(SStreamState* pState, const void* key, void** pVal, int32_t* pVLen) {
|
||||
int code = 0;
|
||||
STREAM_STATE_GET_ROCKSDB(pState, "default", key, pVal, pVLen);
|
||||
|
@ -3535,10 +3532,10 @@ int32_t streamDefaultIterGet_rocksdb(SStreamState* pState, const void* start, co
|
|||
if (pIter == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
size_t klen = 0;
|
||||
rocksdb_iter_seek(pIter, start, strlen(start));
|
||||
while (rocksdb_iter_valid(pIter)) {
|
||||
const char* key = rocksdb_iter_key(pIter, NULL);
|
||||
const char* key = rocksdb_iter_key(pIter, &klen);
|
||||
int32_t vlen = 0;
|
||||
const char* vval = rocksdb_iter_value(pIter, (size_t*)&vlen);
|
||||
char* val = NULL;
|
||||
|
@ -3700,6 +3697,8 @@ uint32_t nextPow2(uint32_t x) {
|
|||
x = x | (x >> 16);
|
||||
return x + 1;
|
||||
}
|
||||
|
||||
#ifdef BUILD_NO_CALL
|
||||
int32_t copyFiles(const char* src, const char* dst) {
|
||||
int32_t code = 0;
|
||||
// opt later, just hard link
|
||||
|
@ -3739,6 +3738,7 @@ _err:
|
|||
taosCloseDir(&pDir);
|
||||
return code >= 0 ? 0 : -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
int32_t isBkdDataMeta(char* name, int32_t len) {
|
||||
const char* pCurrent = "CURRENT";
|
||||
|
|
|
@ -315,6 +315,16 @@ int32_t getNumOfDispatchBranch(SStreamTask* pTask) {
|
|||
: taosArrayGetSize(pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos);
|
||||
}
|
||||
|
||||
void clearBufferedDispatchMsg(SStreamTask* pTask) {
|
||||
SDispatchMsgInfo* pMsgInfo = &pTask->msgInfo;
|
||||
if (pMsgInfo->pData != NULL) {
|
||||
destroyDispatchMsg(pMsgInfo->pData, getNumOfDispatchBranch(pTask));
|
||||
}
|
||||
|
||||
pMsgInfo->pData = NULL;
|
||||
pMsgInfo->dispatchMsgType = 0;
|
||||
}
|
||||
|
||||
static int32_t doBuildDispatchMsg(SStreamTask* pTask, const SStreamDataBlock* pData) {
|
||||
int32_t code = 0;
|
||||
int32_t numOfBlocks = taosArrayGetSize(pData->blocks);
|
||||
|
@ -678,8 +688,7 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) {
|
|||
// todo deal with only partially success dispatch case
|
||||
atomic_store_32(&pTask->outputInfo.shuffleDispatcher.waitingRspCnt, 0);
|
||||
if (terrno == TSDB_CODE_APP_IS_STOPPING) { // in case of this error, do not retry anymore
|
||||
destroyDispatchMsg(pTask->msgInfo.pData, getNumOfDispatchBranch(pTask));
|
||||
pTask->msgInfo.pData = NULL;
|
||||
clearBufferedDispatchMsg(pTask);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -740,6 +749,8 @@ int32_t streamTaskSendCheckpointSourceRsp(SStreamTask* pTask) {
|
|||
|
||||
int32_t streamAddBlockIntoDispatchMsg(const SSDataBlock* pBlock, SStreamDispatchReq* pReq) {
|
||||
int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock);
|
||||
ASSERT(dataStrLen > 0);
|
||||
|
||||
void* buf = taosMemoryCalloc(1, dataStrLen);
|
||||
if (buf == NULL) return -1;
|
||||
|
||||
|
@ -936,15 +947,12 @@ void streamClearChkptReadyMsg(SStreamTask* pTask) {
|
|||
// this message has been sent successfully, let's try next one.
|
||||
static int32_t handleDispatchSuccessRsp(SStreamTask* pTask, int32_t downstreamId) {
|
||||
stDebug("s-task:%s destroy dispatch msg:%p", pTask->id.idStr, pTask->msgInfo.pData);
|
||||
destroyDispatchMsg(pTask->msgInfo.pData, getNumOfDispatchBranch(pTask));
|
||||
|
||||
bool delayDispatch = (pTask->msgInfo.dispatchMsgType == STREAM_INPUT__CHECKPOINT_TRIGGER);
|
||||
if (delayDispatch) {
|
||||
pTask->chkInfo.dispatchCheckpointTrigger = true;
|
||||
}
|
||||
|
||||
pTask->msgInfo.pData = NULL;
|
||||
pTask->msgInfo.dispatchMsgType = 0;
|
||||
clearBufferedDispatchMsg(pTask);
|
||||
|
||||
int64_t el = taosGetTimestampMs() - pTask->msgInfo.startTs;
|
||||
|
||||
|
@ -1084,7 +1092,7 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
|
|||
} else { // this message has been sent successfully, let's try next one.
|
||||
pTask->msgInfo.retryCount = 0;
|
||||
|
||||
// transtate msg has been sent to downstream successfully. let's transfer the fill-history task state
|
||||
// trans-state msg has been sent to downstream successfully. let's transfer the fill-history task state
|
||||
if (pTask->msgInfo.dispatchMsgType == STREAM_INPUT__TRANS_STATE) {
|
||||
stDebug("s-task:%s dispatch transtate msgId:%d to downstream successfully, start to transfer state", id, msgId);
|
||||
ASSERT(pTask->info.fillHistory == 1);
|
||||
|
@ -1093,6 +1101,8 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
|
|||
if (code != TSDB_CODE_SUCCESS) { // todo: do nothing if error happens
|
||||
}
|
||||
|
||||
clearBufferedDispatchMsg(pTask);
|
||||
|
||||
// now ready for next data output
|
||||
atomic_store_8(&pTask->outputq.status, TASK_OUTPUT_STATUS__NORMAL);
|
||||
} else {
|
||||
|
|
|
@ -353,7 +353,8 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) {
|
|||
if (pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||
ASSERT(status == TASK_STATUS__HALT || status == TASK_STATUS__DROPPING || status == TASK_STATUS__STOP);
|
||||
} else {
|
||||
ASSERT(status == TASK_STATUS__READY || status == TASK_STATUS__DROPPING || status == TASK_STATUS__STOP);
|
||||
ASSERT(status == TASK_STATUS__READY || status == TASK_STATUS__PAUSE || status == TASK_STATUS__DROPPING ||
|
||||
status == TASK_STATUS__STOP);
|
||||
int32_t code = streamTaskHandleEvent(pStreamTask->status.pSM, TASK_EVENT_HALT);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
stError("s-task:%s halt stream task:%s failed, code:%s not transfer state to stream task", id,
|
||||
|
@ -410,6 +411,12 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t haltCallback(SStreamTask* pTask, void* param) {
|
||||
streamTaskOpenAllUpstreamInput(pTask);
|
||||
streamTaskSendCheckpointReq(pTask);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SStreamMeta* pMeta = pTask->pMeta;
|
||||
|
@ -419,11 +426,12 @@ int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
|
|||
int32_t level = pTask->info.taskLevel;
|
||||
if (level == TASK_LEVEL__AGG || level == TASK_LEVEL__SOURCE) { // do transfer task operator states.
|
||||
code = streamDoTransferStateToStreamTask(pTask);
|
||||
} else { // no state transfer for sink tasks, and drop fill-history task, followed by opening inputQ of sink task.
|
||||
} else {
|
||||
// no state transfer for sink tasks, and drop fill-history task, followed by opening inputQ of sink task.
|
||||
SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId);
|
||||
if (pStreamTask != NULL) {
|
||||
// halt the related stream sink task
|
||||
code = streamTaskHandleEvent(pStreamTask->status.pSM, TASK_EVENT_HALT);
|
||||
code = streamTaskHandleEventAsync(pStreamTask->status.pSM, TASK_EVENT_HALT, haltCallback, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
stError("s-task:%s halt stream task:%s failed, code:%s not transfer state to stream task", pTask->id.idStr,
|
||||
pStreamTask->id.idStr, tstrerror(code));
|
||||
|
@ -432,9 +440,6 @@ int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
|
|||
} else {
|
||||
stDebug("s-task:%s halt by related fill-history task:%s", pStreamTask->id.idStr, pTask->id.idStr);
|
||||
}
|
||||
|
||||
streamTaskOpenAllUpstreamInput(pStreamTask);
|
||||
streamTaskSendCheckpointReq(pStreamTask);
|
||||
streamMetaReleaseTask(pMeta, pStreamTask);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ static void streamMetaEnvInit() {
|
|||
streamTimerInit();
|
||||
}
|
||||
|
||||
void streamMetaInit() { taosThreadOnce(&streamMetaModuleInit, streamMetaEnvInit);}
|
||||
void streamMetaInit() { taosThreadOnce(&streamMetaModuleInit, streamMetaEnvInit); }
|
||||
|
||||
void streamMetaCleanup() {
|
||||
taosCloseRef(streamBackendId);
|
||||
|
@ -669,6 +669,13 @@ static void doRemoveIdFromList(SStreamMeta* pMeta, int32_t num, SStreamTaskId* i
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t streamTaskSendTransSuccessMsg(SStreamTask* pTask, void* param) {
|
||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||
streamTaskSendCheckpointSourceRsp(pTask);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) {
|
||||
SStreamTask* pTask = NULL;
|
||||
|
||||
|
@ -687,7 +694,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
|
|||
}
|
||||
|
||||
// handle the dropping event
|
||||
streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_DROPPING);
|
||||
streamTaskHandleEventAsync(pTask->status.pSM, TASK_EVENT_DROPPING, streamTaskSendTransSuccessMsg, NULL);
|
||||
} else {
|
||||
stDebug("vgId:%d failed to find the task:0x%x, it may be dropped already", pMeta->vgId, taskId);
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
@ -1104,14 +1111,14 @@ static int32_t metaHeartbeatToMnodeImpl(SStreamMeta* pMeta) {
|
|||
.inputQUsed = SIZE_IN_MiB(streamQueueGetItemSize((*pTask)->inputq.queue)),
|
||||
};
|
||||
|
||||
entry.inputRate = entry.inputQUsed * 100.0 / (2*STREAM_TASK_QUEUE_CAPACITY_IN_SIZE);
|
||||
entry.inputRate = entry.inputQUsed * 100.0 / (2 * STREAM_TASK_QUEUE_CAPACITY_IN_SIZE);
|
||||
if ((*pTask)->info.taskLevel == TASK_LEVEL__SINK) {
|
||||
entry.sinkQuota = (*pTask)->outputInfo.pTokenBucket->quotaRate;
|
||||
entry.sinkDataSize = SIZE_IN_MiB((*pTask)->execInfo.sink.dataSize);
|
||||
}
|
||||
|
||||
if ((*pTask)->chkInfo.checkpointingId != 0) {
|
||||
entry.checkpointFailed = ((*pTask)->chkInfo.failedId >= (*pTask)->chkInfo.checkpointingId)? 1:0;
|
||||
entry.checkpointFailed = ((*pTask)->chkInfo.failedId >= (*pTask)->chkInfo.checkpointingId) ? 1 : 0;
|
||||
entry.checkpointId = (*pTask)->chkInfo.checkpointingId;
|
||||
entry.chkpointTransId = (*pTask)->chkInfo.transId;
|
||||
|
||||
|
@ -1172,7 +1179,7 @@ static int32_t metaHeartbeatToMnodeImpl(SStreamMeta* pMeta) {
|
|||
stDebug("vgId:%d no tasks and no mnd epset, not send stream hb to mnode", pMeta->vgId);
|
||||
}
|
||||
|
||||
_end:
|
||||
_end:
|
||||
streamMetaClearHbMsg(&hbMsg);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1304,28 +1311,28 @@ void streamMetaResetStartInfo(STaskStartInfo* pStartInfo) {
|
|||
}
|
||||
|
||||
void streamMetaRLock(SStreamMeta* pMeta) {
|
||||
// stTrace("vgId:%d meta-rlock", pMeta->vgId);
|
||||
// stTrace("vgId:%d meta-rlock", pMeta->vgId);
|
||||
taosThreadRwlockRdlock(&pMeta->lock);
|
||||
}
|
||||
|
||||
void streamMetaRUnLock(SStreamMeta* pMeta) {
|
||||
// stTrace("vgId:%d meta-runlock", pMeta->vgId);
|
||||
// stTrace("vgId:%d meta-runlock", pMeta->vgId);
|
||||
int32_t code = taosThreadRwlockUnlock(&pMeta->lock);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
stError("vgId:%d meta-runlock failed, code:%d", pMeta->vgId, code);
|
||||
} else {
|
||||
// stTrace("vgId:%d meta-runlock completed", pMeta->vgId);
|
||||
// stTrace("vgId:%d meta-runlock completed", pMeta->vgId);
|
||||
}
|
||||
}
|
||||
|
||||
void streamMetaWLock(SStreamMeta* pMeta) {
|
||||
// stTrace("vgId:%d meta-wlock", pMeta->vgId);
|
||||
// stTrace("vgId:%d meta-wlock", pMeta->vgId);
|
||||
taosThreadRwlockWrlock(&pMeta->lock);
|
||||
// stTrace("vgId:%d meta-wlock completed", pMeta->vgId);
|
||||
// stTrace("vgId:%d meta-wlock completed", pMeta->vgId);
|
||||
}
|
||||
|
||||
void streamMetaWUnLock(SStreamMeta* pMeta) {
|
||||
// stTrace("vgId:%d meta-wunlock", pMeta->vgId);
|
||||
// stTrace("vgId:%d meta-wunlock", pMeta->vgId);
|
||||
taosThreadRwlockUnlock(&pMeta->lock);
|
||||
}
|
||||
|
||||
|
@ -1395,7 +1402,7 @@ void streamMetaUpdateStageRole(SStreamMeta* pMeta, int64_t stage, bool isLeader)
|
|||
pMeta->sendMsgBeforeClosing = true;
|
||||
}
|
||||
|
||||
pMeta->role = (isLeader)? NODE_ROLE_LEADER:NODE_ROLE_FOLLOWER;
|
||||
pMeta->role = (isLeader) ? NODE_ROLE_LEADER : NODE_ROLE_FOLLOWER;
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
if (isLeader) {
|
||||
|
@ -1531,8 +1538,8 @@ int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) {
|
|||
|
||||
bool streamMetaAllTasksReady(const SStreamMeta* pMeta) {
|
||||
int32_t num = taosArrayGetSize(pMeta->pTaskList);
|
||||
for(int32_t i = 0; i < num; ++i) {
|
||||
STaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i);
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
STaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i);
|
||||
SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, pTaskId, sizeof(*pTaskId));
|
||||
if (ppTask == NULL) {
|
||||
continue;
|
||||
|
@ -1633,7 +1640,7 @@ int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int3
|
|||
pStartInfo->elapsedTime = (pStartInfo->startTs != 0) ? pStartInfo->readyTs - pStartInfo->startTs : 0;
|
||||
|
||||
stDebug("vgId:%d all %d task(s) check downstream completed, last completed task:0x%x (succ:%d) startTs:%" PRId64
|
||||
", readyTs:%" PRId64 " total elapsed time:%.2fs",
|
||||
", readyTs:%" PRId64 " total elapsed time:%.2fs",
|
||||
pMeta->vgId, numOfTotal, taskId, ready, pStartInfo->startTs, pStartInfo->readyTs,
|
||||
pStartInfo->elapsedTime / 1000.0);
|
||||
|
||||
|
|
|
@ -156,6 +156,7 @@ int32_t getSessionWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey,
|
|||
(*pVal) = pPos;
|
||||
SSessionKey* pDestWinKey = (SSessionKey*)pPos->pKey;
|
||||
pPos->beUsed = true;
|
||||
pPos->beFlushed = false;
|
||||
*pKey = *pDestWinKey;
|
||||
goto _end;
|
||||
}
|
||||
|
@ -167,6 +168,7 @@ int32_t getSessionWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey,
|
|||
(*pVal) = pPos;
|
||||
SSessionKey* pDestWinKey = (SSessionKey*)pPos->pKey;
|
||||
pPos->beUsed = true;
|
||||
pPos->beFlushed = false;
|
||||
*pKey = *pDestWinKey;
|
||||
goto _end;
|
||||
}
|
||||
|
@ -380,6 +382,14 @@ static SStreamStateCur* seekKeyCurrentPrev_buff(SStreamFileState* pFileState, co
|
|||
(*pWins) = pWinStates;
|
||||
}
|
||||
|
||||
if (size > 0 && index == -1) {
|
||||
SRowBuffPos* pPos = taosArrayGetP(pWinStates, 0);
|
||||
SSessionKey* pWin = (SSessionKey*)pPos->pKey;
|
||||
if (pWinKey->win.skey == pWin->win.skey) {
|
||||
index = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (index >= 0) {
|
||||
pCur = createSessionStateCursor(pFileState);
|
||||
pCur->buffIndex = index;
|
||||
|
@ -387,6 +397,7 @@ static SStreamStateCur* seekKeyCurrentPrev_buff(SStreamFileState* pFileState, co
|
|||
*pIndex = index;
|
||||
}
|
||||
}
|
||||
|
||||
return pCur;
|
||||
}
|
||||
|
||||
|
@ -666,6 +677,7 @@ int32_t getStateWinResultBuff(SStreamFileState* pFileState, SSessionKey* key, ch
|
|||
(*pVal) = pPos;
|
||||
SSessionKey* pDestWinKey = (SSessionKey*)pPos->pKey;
|
||||
pPos->beUsed = true;
|
||||
pPos->beFlushed = false;
|
||||
*key = *pDestWinKey;
|
||||
goto _end;
|
||||
}
|
||||
|
@ -679,6 +691,7 @@ int32_t getStateWinResultBuff(SStreamFileState* pFileState, SSessionKey* key, ch
|
|||
(*pVal) = pPos;
|
||||
SSessionKey* pDestWinKey = (SSessionKey*)pPos->pKey;
|
||||
pPos->beUsed = true;
|
||||
pPos->beFlushed = false;
|
||||
*key = *pDestWinKey;
|
||||
goto _end;
|
||||
}
|
||||
|
@ -771,6 +784,7 @@ int32_t getCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, C
|
|||
(*pVal) = pPos;
|
||||
SSessionKey* pDestWinKey = (SSessionKey*)pPos->pKey;
|
||||
pPos->beUsed = true;
|
||||
pPos->beFlushed = false;
|
||||
*pWinKey = *pDestWinKey;
|
||||
goto _end;
|
||||
}
|
||||
|
@ -799,6 +813,7 @@ int32_t getCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, C
|
|||
(*pVal) = pPos;
|
||||
SSessionKey* pDestWinKey = (SSessionKey*)pPos->pKey;
|
||||
pPos->beUsed = true;
|
||||
pPos->beFlushed = false;
|
||||
*pWinKey = *pDestWinKey;
|
||||
goto _end;
|
||||
}
|
||||
|
|
|
@ -385,7 +385,7 @@ int32_t streamTaskOnScanhistoryTaskReady(SStreamTask* pTask) {
|
|||
|
||||
void doProcessDownstreamReadyRsp(SStreamTask* pTask) {
|
||||
EStreamTaskEvent event = (pTask->info.fillHistory == 0) ? TASK_EVENT_INIT : TASK_EVENT_INIT_SCANHIST;
|
||||
streamTaskOnHandleEventSuccess(pTask->status.pSM, event);
|
||||
streamTaskOnHandleEventSuccess(pTask->status.pSM, event, NULL, NULL);
|
||||
|
||||
int64_t initTs = pTask->execInfo.init;
|
||||
int64_t startTs = pTask->execInfo.start;
|
||||
|
|
|
@ -39,7 +39,7 @@ static int32_t doUpdateTaskEpset(SStreamTask* pTask, int32_t nodeId, SEpSet* pEp
|
|||
stDebug("s-task:0x%x (vgId:%d) self node epset is updated %s", pTask->id.taskId, nodeId, buf);
|
||||
}
|
||||
|
||||
// check for the dispath info and the upstream task info
|
||||
// check for the dispatch info and the upstream task info
|
||||
int32_t level = pTask->info.taskLevel;
|
||||
if (level == TASK_LEVEL__SOURCE) {
|
||||
streamTaskUpdateDownstreamInfo(pTask, nodeId, pEpSet);
|
||||
|
@ -412,9 +412,7 @@ void tFreeStreamTask(SStreamTask* pTask) {
|
|||
pTask->pReadyMsgList = taosArrayDestroy(pTask->pReadyMsgList);
|
||||
|
||||
if (pTask->msgInfo.pData != NULL) {
|
||||
destroyDispatchMsg(pTask->msgInfo.pData, getNumOfDispatchBranch(pTask));
|
||||
pTask->msgInfo.pData = NULL;
|
||||
pTask->msgInfo.dispatchMsgType = 0;
|
||||
clearBufferedDispatchMsg(pTask);
|
||||
}
|
||||
|
||||
if (pTask->outputInfo.type == TASK_OUTPUT__TABLE) {
|
||||
|
@ -624,6 +622,7 @@ void streamTaskSetFixedDownstreamInfo(SStreamTask* pTask, const SStreamTask* pDo
|
|||
void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet) {
|
||||
char buf[512] = {0};
|
||||
EPSET_TO_STR(pEpSet, buf);
|
||||
int32_t id = pTask->id.taskId;
|
||||
|
||||
int8_t type = pTask->outputInfo.type;
|
||||
if (type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||
|
@ -635,8 +634,8 @@ void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SE
|
|||
|
||||
if (pVgInfo->vgId == nodeId) {
|
||||
epsetAssign(&pVgInfo->epSet, pEpSet);
|
||||
stDebug("s-task:0x%x update the dispatch info, task:0x%x(nodeId:%d) newEpset:%s", pTask->id.taskId,
|
||||
pVgInfo->taskId, nodeId, buf);
|
||||
stDebug("s-task:0x%x update the dispatch info, task:0x%x(nodeId:%d) newEpset:%s", id, pVgInfo->taskId, nodeId,
|
||||
buf);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -644,11 +643,9 @@ void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SE
|
|||
STaskDispatcherFixed* pDispatcher = &pTask->outputInfo.fixedDispatcher;
|
||||
if (pDispatcher->nodeId == nodeId) {
|
||||
epsetAssign(&pDispatcher->epSet, pEpSet);
|
||||
stDebug("s-task:0x%x update the dispatch info, task:0x%x(nodeId:%d) newEpSet:%s", pTask->id.taskId,
|
||||
pDispatcher->taskId, nodeId, buf);
|
||||
stDebug("s-task:0x%x update the dispatch info, task:0x%x(nodeId:%d) newEpset:%s", id, pDispatcher->taskId, nodeId,
|
||||
buf);
|
||||
}
|
||||
} else {
|
||||
// do nothing
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -766,21 +763,13 @@ int8_t streamTaskSetSchedStatusInactive(SStreamTask* pTask) {
|
|||
return status;
|
||||
}
|
||||
|
||||
int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, int32_t resetRelHalt, bool metaLock) {
|
||||
if (pTask == NULL) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, int32_t resetRelHalt) {
|
||||
SStreamMeta* pMeta = pTask->pMeta;
|
||||
STaskId sTaskId = {.streamId = pTask->streamTaskId.streamId, .taskId = pTask->streamTaskId.taskId};
|
||||
if (pTask->info.fillHistory == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (metaLock) {
|
||||
streamMetaWLock(pMeta);
|
||||
}
|
||||
|
||||
SStreamTask** ppStreamTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &sTaskId, sizeof(sTaskId));
|
||||
if (ppStreamTask != NULL) {
|
||||
stDebug("s-task:%s clear the related stream task:0x%x attr to fill-history task", pTask->id.idStr,
|
||||
|
@ -798,10 +787,6 @@ int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, int32_t resetRelHalt, bool
|
|||
taosThreadMutexUnlock(&(*ppStreamTask)->lock);
|
||||
}
|
||||
|
||||
if (metaLock) {
|
||||
streamMetaWUnLock(pMeta);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -869,8 +854,8 @@ void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc)
|
|||
pDst->chkpointTransId = pSrc->chkpointTransId;
|
||||
}
|
||||
|
||||
void streamTaskPause(SStreamMeta* pMeta, SStreamTask* pTask) {
|
||||
streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_PAUSE);
|
||||
static int32_t taskPauseCallback(SStreamTask* pTask, void* param) {
|
||||
SStreamMeta* pMeta = pTask->pMeta;
|
||||
|
||||
int32_t num = atomic_add_fetch_32(&pMeta->numOfPausedTasks, 1);
|
||||
stInfo("vgId:%d s-task:%s pause stream task. pause task num:%d", pMeta->vgId, pTask->id.idStr, num);
|
||||
|
@ -882,24 +867,24 @@ void streamTaskPause(SStreamMeta* pMeta, SStreamTask* pTask) {
|
|||
}
|
||||
|
||||
stDebug("vgId:%d s-task:%s set pause flag and pause task", pMeta->vgId, pTask->id.idStr);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void streamTaskPause(SStreamMeta* pMeta, SStreamTask* pTask) {
|
||||
streamTaskHandleEventAsync(pTask->status.pSM, TASK_EVENT_PAUSE, taskPauseCallback, NULL);
|
||||
}
|
||||
|
||||
void streamTaskResume(SStreamTask* pTask) {
|
||||
SStreamTaskState prevState = *streamTaskGetStatus(pTask);
|
||||
SStreamMeta* pMeta = pTask->pMeta;
|
||||
|
||||
if (prevState.state == TASK_STATUS__PAUSE || prevState.state == TASK_STATUS__HALT) {
|
||||
streamTaskRestoreStatus(pTask);
|
||||
|
||||
char* pNew = streamTaskGetStatus(pTask)->name;
|
||||
if (prevState.state == TASK_STATUS__PAUSE) {
|
||||
int32_t num = atomic_sub_fetch_32(&pMeta->numOfPausedTasks, 1);
|
||||
stInfo("s-task:%s status:%s resume from %s, paused task(s):%d", pTask->id.idStr, pNew, prevState.name, num);
|
||||
} else {
|
||||
stInfo("s-task:%s status:%s resume from %s", pTask->id.idStr, pNew, prevState.name);
|
||||
}
|
||||
SStreamMeta* pMeta = pTask->pMeta;
|
||||
int32_t code = streamTaskRestoreStatus(pTask);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
char* pNew = streamTaskGetStatus(pTask)->name;
|
||||
int32_t num = atomic_sub_fetch_32(&pMeta->numOfPausedTasks, 1);
|
||||
stInfo("s-task:%s status:%s resume from %s, paused task(s):%d", pTask->id.idStr, pNew, prevState.name, num);
|
||||
} else {
|
||||
stDebug("s-task:%s status:%s not in pause/halt status, no need to resume", pTask->id.idStr, prevState.name);
|
||||
stInfo("s-task:%s status:%s no need to resume, paused task(s):%d", pTask->id.idStr, prevState.name, pMeta->numOfPausedTasks);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -59,20 +59,23 @@ static int32_t streamTaskInitStatus(SStreamTask* pTask);
|
|||
static int32_t streamTaskKeepCurrentVerInWal(SStreamTask* pTask);
|
||||
static int32_t initStateTransferTable();
|
||||
static void doInitStateTransferTable(void);
|
||||
static int32_t streamTaskSendTransSuccessMsg(SStreamTask* pTask);
|
||||
|
||||
static STaskStateTrans createStateTransform(ETaskStatus current, ETaskStatus next, EStreamTaskEvent event,
|
||||
__state_trans_fn fn, __state_trans_succ_fn succFn,
|
||||
SAttachedEventInfo* pEventInfo, bool autoInvoke);
|
||||
SFutureHandleEventInfo* pEventInfo);
|
||||
|
||||
static int32_t dummyFn(SStreamTask* UNUSED_PARAM(p)) { return TSDB_CODE_SUCCESS; }
|
||||
|
||||
static int32_t attachEvent(SStreamTask* pTask, SAttachedEventInfo* pEvtInfo) {
|
||||
static int32_t attachWaitedEvent(SStreamTask* pTask, SFutureHandleEventInfo* pEvtInfo) {
|
||||
char* p = streamTaskGetStatus(pTask)->name;
|
||||
|
||||
stDebug("s-task:%s status:%s attach event:%s required status:%s, since not allowed to handle it", pTask->id.idStr, p,
|
||||
GET_EVT_NAME(pEvtInfo->event), StreamTaskStatusList[pEvtInfo->status].name);
|
||||
taosArrayPush(pTask->status.pSM->pWaitingEventList, pEvtInfo);
|
||||
|
||||
SArray* pList = pTask->status.pSM->pWaitingEventList;
|
||||
taosArrayPush(pList, pEvtInfo);
|
||||
|
||||
stDebug("s-task:%s add into waiting list, total waiting events:%d", pTask->id.idStr, (int32_t)taosArrayGetSize(pList));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -85,18 +88,6 @@ int32_t streamTaskInitStatus(SStreamTask* pTask) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t streamTaskDoCheckpoint(SStreamTask* pTask) {
|
||||
stDebug("s-task:%s start to do checkpoint", pTask->id.idStr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamTaskSendTransSuccessMsg(SStreamTask* pTask) {
|
||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||
streamTaskSendCheckpointSourceRsp(pTask);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamTaskKeepCurrentVerInWal(SStreamTask* pTask) {
|
||||
if (!HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||
stError("s-task:%s no related fill-history task, since it may have been dropped already", pTask->id.idStr);
|
||||
|
@ -170,9 +161,11 @@ static int32_t doHandleWaitingEvent(SStreamTaskSM* pSM, const char* pEventName,
|
|||
stDebug("s-task:%s handle event:%s completed, elapsed time:%" PRId64 "ms state:%s -> %s", pTask->id.idStr,
|
||||
pEventName, el, pSM->prev.state.name, pSM->current.name);
|
||||
|
||||
SAttachedEventInfo* pEvtInfo = taosArrayGet(pSM->pWaitingEventList, 0);
|
||||
ASSERT(taosArrayGetSize(pSM->pWaitingEventList) == 1);
|
||||
|
||||
// OK, let's handle the attached event, since the task has reached the required status now
|
||||
SFutureHandleEventInfo* pEvtInfo = taosArrayGet(pSM->pWaitingEventList, 0);
|
||||
|
||||
// OK, let's handle the waiting event, since the task has reached the required status now
|
||||
if (pSM->current.state == pEvtInfo->status) {
|
||||
stDebug("s-task:%s handle the event:%s in waiting list, state:%s", pTask->id.idStr,
|
||||
GET_EVT_NAME(pEvtInfo->event), pSM->current.name);
|
||||
|
@ -189,7 +182,7 @@ static int32_t doHandleWaitingEvent(SStreamTaskSM* pSM, const char* pEventName,
|
|||
|
||||
code = pNextTrans->pAction(pSM->pTask);
|
||||
if (pNextTrans->autoInvokeEndFn) {
|
||||
return streamTaskOnHandleEventSuccess(pSM, pNextTrans->event);
|
||||
return streamTaskOnHandleEventSuccess(pSM, pNextTrans->event, pEvtInfo->callBackFn, pEvtInfo->pParam);
|
||||
} else {
|
||||
return code;
|
||||
}
|
||||
|
@ -203,30 +196,61 @@ static int32_t doHandleWaitingEvent(SStreamTaskSM* pSM, const char* pEventName,
|
|||
return code;
|
||||
}
|
||||
|
||||
void streamTaskRestoreStatus(SStreamTask* pTask) {
|
||||
static int32_t removeEventInWaitingList(SStreamTask* pTask, EStreamTaskEvent event) {
|
||||
SStreamTaskSM* pSM = pTask->status.pSM;
|
||||
|
||||
bool removed = false;
|
||||
taosThreadMutexLock(&pTask->lock);
|
||||
|
||||
ASSERT(pSM->pActiveTrans == NULL);
|
||||
ASSERT(pSM->current.state == TASK_STATUS__PAUSE || pSM->current.state == TASK_STATUS__HALT);
|
||||
int32_t num = taosArrayGetSize(pSM->pWaitingEventList);
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
SFutureHandleEventInfo* pInfo = taosArrayGet(pSM->pWaitingEventList, i);
|
||||
if (pInfo->event == event) {
|
||||
taosArrayRemove(pSM->pWaitingEventList, i);
|
||||
stDebug("s-task:%s pause event in waiting list not be handled yet, remove it from waiting list, remaining:%d",
|
||||
pTask->id.idStr, pInfo->event);
|
||||
removed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
SStreamTaskState state = pSM->current;
|
||||
pSM->current = pSM->prev.state;
|
||||
|
||||
pSM->prev.state = state;
|
||||
pSM->prev.evt = 0;
|
||||
|
||||
pSM->startTs = taosGetTimestampMs();
|
||||
|
||||
if (taosArrayGetSize(pSM->pWaitingEventList) > 0) {
|
||||
stDebug("s-task:%s restore status, %s -> %s, and then handle waiting event", pTask->id.idStr, pSM->prev.state.name, pSM->current.name);
|
||||
doHandleWaitingEvent(pSM, "restore-pause/halt", pTask);
|
||||
} else {
|
||||
stDebug("s-task:%s restore status, %s -> %s", pTask->id.idStr, pSM->prev.state.name, pSM->current.name);
|
||||
if (!removed) {
|
||||
stDebug("s-task:%s failed to remove event:%s in waiting list", pTask->id.idStr, StreamTaskEventList[event].name);
|
||||
}
|
||||
|
||||
taosThreadMutexUnlock(&pTask->lock);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t streamTaskRestoreStatus(SStreamTask* pTask) {
|
||||
SStreamTaskSM* pSM = pTask->status.pSM;
|
||||
int32_t code = 0;
|
||||
|
||||
taosThreadMutexLock(&pTask->lock);
|
||||
|
||||
if (pSM->current.state == TASK_STATUS__PAUSE && pSM->pActiveTrans == NULL) {
|
||||
SStreamTaskState state = pSM->current;
|
||||
pSM->current = pSM->prev.state;
|
||||
|
||||
pSM->prev.state = state;
|
||||
pSM->prev.evt = 0;
|
||||
|
||||
pSM->startTs = taosGetTimestampMs();
|
||||
|
||||
if (taosArrayGetSize(pSM->pWaitingEventList) > 0) {
|
||||
stDebug("s-task:%s restore status, %s -> %s, and then handle waiting event", pTask->id.idStr,
|
||||
pSM->prev.state.name, pSM->current.name);
|
||||
doHandleWaitingEvent(pSM, "restore-pause/halt", pTask);
|
||||
} else {
|
||||
stDebug("s-task:%s restore status, %s -> %s", pTask->id.idStr, pSM->prev.state.name, pSM->current.name);
|
||||
}
|
||||
} else {
|
||||
removeEventInWaitingList(pTask, TASK_EVENT_PAUSE);
|
||||
code = -1; // failed to restore the status
|
||||
}
|
||||
|
||||
taosThreadMutexUnlock(&pTask->lock);
|
||||
return code;
|
||||
}
|
||||
|
||||
SStreamTaskSM* streamCreateStateMachine(SStreamTask* pTask) {
|
||||
|
@ -242,7 +266,7 @@ SStreamTaskSM* streamCreateStateMachine(SStreamTask* pTask) {
|
|||
}
|
||||
|
||||
pSM->pTask = pTask;
|
||||
pSM->pWaitingEventList = taosArrayInit(4, sizeof(SAttachedEventInfo));
|
||||
pSM->pWaitingEventList = taosArrayInit(4, sizeof(SFutureHandleEventInfo));
|
||||
if (pSM->pWaitingEventList == NULL) {
|
||||
taosMemoryFree(pSM);
|
||||
|
||||
|
@ -273,7 +297,7 @@ static int32_t doHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event, STaskSt
|
|||
const char* id = pTask->id.idStr;
|
||||
|
||||
if (pTrans->attachEvent.event != 0) {
|
||||
attachEvent(pTask, &pTrans->attachEvent);
|
||||
attachWaitedEvent(pTask, &pTrans->attachEvent);
|
||||
taosThreadMutexUnlock(&pTask->lock);
|
||||
|
||||
while (1) {
|
||||
|
@ -303,7 +327,32 @@ static int32_t doHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event, STaskSt
|
|||
// todo handle error code;
|
||||
|
||||
if (pTrans->autoInvokeEndFn) {
|
||||
streamTaskOnHandleEventSuccess(pSM, event);
|
||||
streamTaskOnHandleEventSuccess(pSM, event, NULL, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t doHandleEventAsync(SStreamTaskSM* pSM, EStreamTaskEvent event, STaskStateTrans* pTrans, __state_trans_user_fn callbackFn, void* param) {
|
||||
SStreamTask* pTask = pSM->pTask;
|
||||
if (pTrans->attachEvent.event != 0) {
|
||||
SFutureHandleEventInfo info = pTrans->attachEvent;
|
||||
info.pParam = param;
|
||||
info.callBackFn = callbackFn;
|
||||
|
||||
attachWaitedEvent(pTask, &info);
|
||||
taosThreadMutexUnlock(&pTask->lock);
|
||||
} else { // override current active trans
|
||||
pSM->pActiveTrans = pTrans;
|
||||
pSM->startTs = taosGetTimestampMs();
|
||||
taosThreadMutexUnlock(&pTask->lock);
|
||||
|
||||
int32_t code = pTrans->pAction(pTask);
|
||||
// todo handle error code;
|
||||
|
||||
if (pTrans->autoInvokeEndFn) {
|
||||
streamTaskOnHandleEventSuccess(pSM, event, callbackFn, param);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -349,6 +398,45 @@ int32_t streamTaskHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event) {
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t streamTaskHandleEventAsync(SStreamTaskSM* pSM, EStreamTaskEvent event, __state_trans_user_fn callbackFn, void* param) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SStreamTask* pTask = pSM->pTask;
|
||||
STaskStateTrans* pTrans = NULL;
|
||||
|
||||
while (1) {
|
||||
taosThreadMutexLock(&pTask->lock);
|
||||
|
||||
if (pSM->pActiveTrans != NULL && pSM->pActiveTrans->autoInvokeEndFn) {
|
||||
EStreamTaskEvent evt = pSM->pActiveTrans->event;
|
||||
taosThreadMutexUnlock(&pTask->lock);
|
||||
|
||||
stDebug("s-task:%s status:%s handling event:%s by some other thread, wait for 100ms and check if completed",
|
||||
pTask->id.idStr, pSM->current.name, GET_EVT_NAME(evt));
|
||||
taosMsleep(100);
|
||||
} else {
|
||||
// no active event trans exists, handle this event directly
|
||||
pTrans = streamTaskFindTransform(pSM->current.state, event);
|
||||
if (pTrans == NULL) {
|
||||
stDebug("s-task:%s failed to handle event:%s, status:%s", pTask->id.idStr, GET_EVT_NAME(event), pSM->current.name);
|
||||
taosThreadMutexUnlock(&pTask->lock);
|
||||
return TSDB_CODE_STREAM_INVALID_STATETRANS;
|
||||
}
|
||||
|
||||
if (pSM->pActiveTrans != NULL) {
|
||||
// currently in some state transfer procedure, not auto invoke transfer, quit from this procedure
|
||||
stDebug("s-task:%s event:%s handle procedure quit, status %s -> %s failed, handle event %s now",
|
||||
pTask->id.idStr, GET_EVT_NAME(pSM->pActiveTrans->event), pSM->current.name,
|
||||
pSM->pActiveTrans->next.name, GET_EVT_NAME(event));
|
||||
}
|
||||
|
||||
code = doHandleEventAsync(pSM, event, pTrans, callbackFn, param);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static void keepPrevInfo(SStreamTaskSM* pSM) {
|
||||
STaskStateTrans* pTrans = pSM->pActiveTrans;
|
||||
|
||||
|
@ -356,8 +444,9 @@ static void keepPrevInfo(SStreamTaskSM* pSM) {
|
|||
pSM->prev.evt = pTrans->event;
|
||||
}
|
||||
|
||||
int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent event) {
|
||||
int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent event, __state_trans_user_fn callbackFn, void* param) {
|
||||
SStreamTask* pTask = pSM->pTask;
|
||||
const char* id = pTask->id.idStr;
|
||||
|
||||
// do update the task status
|
||||
taosThreadMutexLock(&pTask->lock);
|
||||
|
@ -369,16 +458,16 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even
|
|||
s == TASK_STATUS__UNINIT || s == TASK_STATUS__READY);
|
||||
|
||||
// the pSM->prev.evt may be 0, so print string is not appropriate.
|
||||
stDebug("s-task:%s event:%s handled failed, current status:%s, trigger event:%s", pTask->id.idStr,
|
||||
GET_EVT_NAME(event), pSM->current.name, GET_EVT_NAME(pSM->prev.evt));
|
||||
stDebug("s-task:%s event:%s handled failed, current status:%s, trigger event:%s", id, GET_EVT_NAME(event),
|
||||
pSM->current.name, GET_EVT_NAME(pSM->prev.evt));
|
||||
|
||||
taosThreadMutexUnlock(&pTask->lock);
|
||||
return TSDB_CODE_STREAM_INVALID_STATETRANS;
|
||||
}
|
||||
|
||||
if (pTrans->event != event) {
|
||||
stWarn("s-task:%s handle event:%s failed, current status:%s, active trans evt:%s", pTask->id.idStr,
|
||||
GET_EVT_NAME(event), pSM->current.name, GET_EVT_NAME(pTrans->event));
|
||||
stWarn("s-task:%s handle event:%s failed, current status:%s, active trans evt:%s", id, GET_EVT_NAME(event),
|
||||
pSM->current.name, GET_EVT_NAME(pTrans->event));
|
||||
taosThreadMutexUnlock(&pTask->lock);
|
||||
return TSDB_CODE_STREAM_INVALID_STATETRANS;
|
||||
}
|
||||
|
@ -388,16 +477,31 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even
|
|||
pSM->current = pTrans->next;
|
||||
pSM->pActiveTrans = NULL;
|
||||
|
||||
// todo remove it
|
||||
// on success callback, add into lock if necessary, or maybe we should add an option for this?
|
||||
pTrans->pSuccAction(pTask);
|
||||
|
||||
taosThreadMutexUnlock(&pTask->lock);
|
||||
|
||||
// todo: add parameter to control lock
|
||||
// after handling the callback function assigned by invoker, go on handling the waiting tasks
|
||||
if (callbackFn != NULL) {
|
||||
stDebug("s-task:%s start to handle user-specified callback fn for event:%s", id, GET_EVT_NAME(pTrans->event));
|
||||
callbackFn(pSM->pTask, param);
|
||||
|
||||
stDebug("s-task:%s handle user-specified callback fn for event:%s completed", id, GET_EVT_NAME(pTrans->event));
|
||||
}
|
||||
|
||||
taosThreadMutexLock(&pTask->lock);
|
||||
|
||||
// tasks in waiting list
|
||||
if (taosArrayGetSize(pSM->pWaitingEventList) > 0) {
|
||||
doHandleWaitingEvent(pSM, GET_EVT_NAME(pTrans->event), pTask);
|
||||
} else {
|
||||
taosThreadMutexUnlock(&pTask->lock);
|
||||
|
||||
int64_t el = (taosGetTimestampMs() - pSM->startTs);
|
||||
stDebug("s-task:%s handle event:%s completed, elapsed time:%" PRId64 "ms state:%s -> %s", pTask->id.idStr,
|
||||
stDebug("s-task:%s handle event:%s completed, elapsed time:%" PRId64 "ms state:%s -> %s", id,
|
||||
GET_EVT_NAME(pTrans->event), el, pSM->prev.state.name, pSM->current.name);
|
||||
}
|
||||
|
||||
|
@ -453,7 +557,7 @@ void streamTaskSetStatusReady(SStreamTask* pTask) {
|
|||
}
|
||||
|
||||
STaskStateTrans createStateTransform(ETaskStatus current, ETaskStatus next, EStreamTaskEvent event, __state_trans_fn fn,
|
||||
__state_trans_succ_fn succFn, SAttachedEventInfo* pEventInfo, bool autoInvoke) {
|
||||
__state_trans_succ_fn succFn, SFutureHandleEventInfo* pEventInfo) {
|
||||
STaskStateTrans trans = {0};
|
||||
trans.state = StreamTaskStatusList[current];
|
||||
trans.next = StreamTaskStatusList[next];
|
||||
|
@ -468,7 +572,7 @@ STaskStateTrans createStateTransform(ETaskStatus current, ETaskStatus next, EStr
|
|||
|
||||
trans.pAction = (fn != NULL) ? fn : dummyFn;
|
||||
trans.pSuccAction = (succFn != NULL) ? succFn : dummyFn;
|
||||
trans.autoInvokeEndFn = autoInvoke;
|
||||
trans.autoInvokeEndFn = (fn == NULL);
|
||||
return trans;
|
||||
}
|
||||
|
||||
|
@ -482,93 +586,93 @@ void doInitStateTransferTable(void) {
|
|||
streamTaskSMTrans = taosArrayInit(8, sizeof(STaskStateTrans));
|
||||
|
||||
// initialization event handle
|
||||
STaskStateTrans trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__READY, TASK_EVENT_INIT, streamTaskInitStatus, streamTaskOnNormalTaskReady, false, false);
|
||||
STaskStateTrans trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__READY, TASK_EVENT_INIT, streamTaskInitStatus, streamTaskOnNormalTaskReady, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__SCAN_HISTORY, TASK_EVENT_INIT_SCANHIST, streamTaskInitStatus, streamTaskOnScanhistoryTaskReady, false, false);
|
||||
trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__SCAN_HISTORY, TASK_EVENT_INIT_SCANHIST, streamTaskInitStatus, streamTaskOnScanhistoryTaskReady, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
// scan-history related event
|
||||
trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__READY, TASK_EVENT_SCANHIST_DONE, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__READY, TASK_EVENT_SCANHIST_DONE, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
// halt stream task, from other task status
|
||||
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
SAttachedEventInfo info = {.status = TASK_STATUS__READY, .event = TASK_EVENT_HALT};
|
||||
SFutureHandleEventInfo info = {.status = TASK_STATUS__READY, .event = TASK_EVENT_HALT};
|
||||
|
||||
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, &info, true);
|
||||
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, &info);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__HALT, TASK_EVENT_HALT, NULL, streamTaskKeepCurrentVerInWal, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
// checkpoint related event
|
||||
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__CK, TASK_EVENT_GEN_CHECKPOINT, NULL, streamTaskDoCheckpoint, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__CK, TASK_EVENT_GEN_CHECKPOINT, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__CK, TASK_EVENT_GEN_CHECKPOINT, NULL, streamTaskDoCheckpoint, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__CK, TASK_EVENT_GEN_CHECKPOINT, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__READY, TASK_EVENT_CHECKPOINT_DONE, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__READY, TASK_EVENT_CHECKPOINT_DONE, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
// pause & resume related event handle
|
||||
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
info = (SAttachedEventInfo){.status = TASK_STATUS__READY, .event = TASK_EVENT_PAUSE};
|
||||
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info, true);
|
||||
info = (SFutureHandleEventInfo){.status = TASK_STATUS__READY, .event = TASK_EVENT_PAUSE};
|
||||
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info, true);
|
||||
trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, &info);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__PAUSE, TASK_EVENT_PAUSE, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__STOP, TASK_STATUS__STOP, TASK_EVENT_PAUSE, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__STOP, TASK_STATUS__STOP, TASK_EVENT_PAUSE, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__DROPPING, TASK_STATUS__DROPPING, TASK_EVENT_PAUSE, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__DROPPING, TASK_STATUS__DROPPING, TASK_EVENT_PAUSE, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
// resume is completed by restore status of state-machine
|
||||
|
||||
// stop related event
|
||||
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__DROPPING, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__DROPPING, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__STOP, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__STOP, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__STOP, TASK_EVENT_STOP, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
|
||||
// dropping related event
|
||||
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__READY, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__DROPPING, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__DROPPING, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__UNINIT, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__STOP, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__STOP, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__SCAN_HISTORY, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__HALT, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__PAUSE, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, streamTaskSendTransSuccessMsg, NULL, NULL, true);
|
||||
trans = createStateTransform(TASK_STATUS__CK, TASK_STATUS__DROPPING, TASK_EVENT_DROPPING, NULL, NULL, NULL);
|
||||
taosArrayPush(streamTaskSMTrans, &trans);
|
||||
}
|
||||
//clang-format on
|
|
@ -22,7 +22,7 @@
|
|||
#define DEFAULT_MAP_CAPACITY 131072
|
||||
#define DEFAULT_MAP_SIZE (DEFAULT_MAP_CAPACITY * 100)
|
||||
#define ROWS_PER_MILLISECOND 1
|
||||
#define MAX_NUM_SCALABLE_BF 100000
|
||||
#define MAX_NUM_SCALABLE_BF 64
|
||||
#define MIN_NUM_SCALABLE_BF 10
|
||||
#define DEFAULT_PREADD_BUCKET 1
|
||||
#define MAX_INTERVAL MILLISECOND_PER_MINUTE
|
||||
|
@ -81,7 +81,9 @@ static int64_t adjustInterval(int64_t interval, int32_t precision) {
|
|||
static int64_t adjustWatermark(int64_t adjInterval, int64_t originInt, int64_t watermark) {
|
||||
if (watermark <= adjInterval) {
|
||||
watermark = TMAX(originInt / adjInterval, 1) * adjInterval;
|
||||
} else if (watermark > MAX_NUM_SCALABLE_BF * adjInterval) {
|
||||
}
|
||||
|
||||
if (watermark > MAX_NUM_SCALABLE_BF * adjInterval) {
|
||||
watermark = MAX_NUM_SCALABLE_BF * adjInterval;
|
||||
}
|
||||
return watermark;
|
||||
|
|
|
@ -1,40 +1,104 @@
|
|||
|
||||
MESSAGE(STATUS "build stream unit test")
|
||||
|
||||
# GoogleTest requires at least C++11
|
||||
SET(CMAKE_CXX_STANDARD 11)
|
||||
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
|
||||
|
||||
# bloomFilterTest
|
||||
ADD_EXECUTABLE(streamUpdateTest "tstreamUpdateTest.cpp")
|
||||
|
||||
TARGET_LINK_LIBRARIES(streamUpdateTest
|
||||
PUBLIC os util common gtest gtest_main stream executor index
|
||||
#TARGET_LINK_LIBRARIES(streamUpdateTest
|
||||
#PUBLIC os util common gtest gtest_main stream executor index
|
||||
#)
|
||||
|
||||
#TARGET_INCLUDE_DIRECTORIES(
|
||||
#streamUpdateTest
|
||||
#PUBLIC "${TD_SOURCE_DIR}/include/libs/stream/"
|
||||
#PRIVATE "${TD_SOURCE_DIR}/source/libs/stream/inc"
|
||||
#)
|
||||
|
||||
#ADD_EXECUTABLE(checkpointTest checkpointTest.cpp)
|
||||
#TARGET_LINK_LIBRARIES(
|
||||
#checkpointTest
|
||||
#PUBLIC os common gtest stream executor qcom index transport util
|
||||
#)
|
||||
|
||||
#TARGET_INCLUDE_DIRECTORIES(
|
||||
#checkpointTest
|
||||
#PRIVATE "${TD_SOURCE_DIR}/source/libs/stream/inc"
|
||||
#)
|
||||
|
||||
#add_executable(backendTest "")
|
||||
|
||||
#target_sources(backendTest
|
||||
#PRIVATE
|
||||
#"backendTest.cpp"
|
||||
#)
|
||||
|
||||
#TARGET_LINK_LIBRARIES(
|
||||
#backendTest
|
||||
#PUBLIC rocksdb
|
||||
#PUBLIC os common gtest stream executor qcom index transport util
|
||||
#)
|
||||
|
||||
#TARGET_INCLUDE_DIRECTORIES(
|
||||
#backendTest
|
||||
#PUBLIC "${TD_SOURCE_DIR}/include/libs/stream/"
|
||||
#PRIVATE "${TD_SOURCE_DIR}/source/libs/stream/inc"
|
||||
#)
|
||||
|
||||
#add_test(
|
||||
#NAME streamUpdateTest
|
||||
#COMMAND streamUpdateTest
|
||||
#)
|
||||
|
||||
#add_test(
|
||||
#NAME checkpointTest
|
||||
#COMMAND checkpointTest
|
||||
#)
|
||||
#add_test(
|
||||
#NAME backendTest
|
||||
#COMMAND backendTest
|
||||
#)
|
||||
|
||||
|
||||
#add_executable(backendTest "")
|
||||
|
||||
#target_sources(backendTest
|
||||
#PUBLIC
|
||||
#"backendTest.cpp"
|
||||
#)
|
||||
|
||||
#target_include_directories(
|
||||
#backendTest
|
||||
#PUBLIC "${TD_SOURCE_DIR}/include/libs/stream/"
|
||||
#PRIVATE "${TD_SOURCE_DIR}/source/libs/stream/inc"
|
||||
#)
|
||||
|
||||
#target_link_libraries(
|
||||
#backendTest
|
||||
#PUBLIC rocksdb
|
||||
#PUBLIC os common gtest stream executor qcom index transport util
|
||||
#)
|
||||
|
||||
|
||||
MESSAGE(STATUS "build parser unit test")
|
||||
|
||||
IF(NOT TD_DARWIN)
|
||||
# GoogleTest requires at least C++11
|
||||
SET(CMAKE_CXX_STANDARD 11)
|
||||
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
|
||||
|
||||
ADD_EXECUTABLE(backendTest ${SOURCE_LIST})
|
||||
TARGET_LINK_LIBRARIES(
|
||||
backendTest
|
||||
PUBLIC rocksdb
|
||||
PUBLIC os common gtest stream executor qcom index transport util vnode
|
||||
)
|
||||
|
||||
TARGET_INCLUDE_DIRECTORIES(
|
||||
streamUpdateTest
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/stream/"
|
||||
PRIVATE "${TD_SOURCE_DIR}/source/libs/stream/inc"
|
||||
)
|
||||
TARGET_INCLUDE_DIRECTORIES(
|
||||
backendTest
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/stream/"
|
||||
PRIVATE "${TD_SOURCE_DIR}/source/libs/stream/inc"
|
||||
)
|
||||
|
||||
ADD_EXECUTABLE(checkpointTest checkpointTest.cpp)
|
||||
TARGET_LINK_LIBRARIES(
|
||||
checkpointTest
|
||||
PUBLIC os common gtest stream executor qcom index transport util
|
||||
)
|
||||
|
||||
TARGET_INCLUDE_DIRECTORIES(
|
||||
checkpointTest
|
||||
PRIVATE "${TD_SOURCE_DIR}/source/libs/stream/inc"
|
||||
)
|
||||
|
||||
add_test(
|
||||
NAME streamUpdateTest
|
||||
COMMAND streamUpdateTest
|
||||
)
|
||||
|
||||
add_test(
|
||||
NAME checkpointTest
|
||||
COMMAND checkpointTest
|
||||
)
|
||||
ADD_TEST(
|
||||
NAME backendTest
|
||||
COMMAND backendTest
|
||||
)
|
||||
ENDIF ()
|
|
@ -0,0 +1,461 @@
|
|||
#include <gtest/gtest.h>
|
||||
|
||||
#include <taoserror.h>
|
||||
#include <tglobal.h>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include "streamBackendRocksdb.h"
|
||||
#include "streamSnapshot.h"
|
||||
#include "streamState.h"
|
||||
#include "tstream.h"
|
||||
#include "tstreamFileState.h"
|
||||
#include "tstreamUpdate.h"
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wwrite-strings"
|
||||
#pragma GCC diagnostic ignored "-Wunused-function"
|
||||
#pragma GCC diagnostic ignored "-Wunused-variable"
|
||||
#pragma GCC diagnostic ignored "-Wsign-compare"
|
||||
#pragma GCC diagnostic ignored "-Wsign-compare"
|
||||
#pragma GCC diagnostic ignored "-Wformat"
|
||||
#pragma GCC diagnostic ignored "-Wint-to-pointer-cast"
|
||||
#pragma GCC diagnostic ignored "-Wpointer-arith"
|
||||
|
||||
class BackendEnv : public ::testing::Test {
|
||||
protected:
|
||||
virtual void SetUp() {}
|
||||
virtual void TearDown() {}
|
||||
};
|
||||
|
||||
void *backendCreate() {
|
||||
const char *streamPath = "/tmp";
|
||||
void * p = NULL;
|
||||
|
||||
// char *absPath = NULL;
|
||||
// // SBackendWrapper *p = (SBackendWrapper *)streamBackendInit(streamPath, -1, 2);
|
||||
// STaskDbWrapper *p = taskDbOpen((char *)streamPath, (char *)"stream-backend", -1);
|
||||
// ASSERT(p != NULL);
|
||||
return p;
|
||||
}
|
||||
|
||||
SStreamState *stateCreate(const char *path) {
|
||||
SStreamTask *pTask = (SStreamTask *)taosMemoryCalloc(1, sizeof(SStreamTask));
|
||||
pTask->ver = 1024;
|
||||
pTask->id.streamId = 1023;
|
||||
pTask->id.taskId = 1111111;
|
||||
SStreamMeta *pMeta = streamMetaOpen((path), NULL, NULL, 0, 0, NULL);
|
||||
pTask->pMeta = pMeta;
|
||||
|
||||
SStreamState *p = streamStateOpen((char *)path, pTask, true, 32, 32 * 1024);
|
||||
ASSERT(p != NULL);
|
||||
return p;
|
||||
}
|
||||
void *backendOpen() {
|
||||
streamMetaInit();
|
||||
const char * path = "/tmp/backend";
|
||||
SStreamState *p = stateCreate(path);
|
||||
ASSERT(p != NULL);
|
||||
|
||||
// write bacth
|
||||
// default/state/fill/sess/func/parname/partag
|
||||
int32_t size = 100;
|
||||
std::vector<int64_t> tsArray;
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
int64_t ts = taosGetTimestampMs();
|
||||
SWinKey key; // = {.groupId = (uint64_t)(i), .ts = ts};
|
||||
key.groupId = (uint64_t)(i);
|
||||
key.ts = ts;
|
||||
const char *val = "value data";
|
||||
int32_t vlen = strlen(val);
|
||||
streamStatePut_rocksdb(p, &key, (char *)val, vlen);
|
||||
|
||||
tsArray.push_back(ts);
|
||||
}
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
int64_t ts = tsArray[i];
|
||||
SWinKey key = {0}; //{.groupId = (uint64_t)(i), .ts = ts};
|
||||
key.groupId = (uint64_t)(i);
|
||||
key.ts = ts;
|
||||
|
||||
const char *val = "value data";
|
||||
int32_t len = 0;
|
||||
char * newVal = NULL;
|
||||
streamStateGet_rocksdb(p, &key, (void **)&newVal, &len);
|
||||
ASSERT(len == strlen(val));
|
||||
}
|
||||
int64_t ts = tsArray[0];
|
||||
SWinKey key = {0}; // {.groupId = (uint64_t)(0), .ts = ts};
|
||||
key.groupId = (uint64_t)(0);
|
||||
key.ts = ts;
|
||||
|
||||
streamStateDel_rocksdb(p, &key);
|
||||
|
||||
streamStateClear_rocksdb(p);
|
||||
|
||||
for (int i = 0; i < size; i++) {
|
||||
int64_t ts = tsArray[i];
|
||||
SWinKey key = {0}; //{.groupId = (uint64_t)(i), .ts = ts};
|
||||
key.groupId = (uint64_t)(i);
|
||||
key.ts = ts;
|
||||
|
||||
const char *val = "value data";
|
||||
int32_t len = 0;
|
||||
char * newVal = NULL;
|
||||
int32_t code = streamStateGet_rocksdb(p, &key, (void **)&newVal, &len);
|
||||
ASSERT(code != 0);
|
||||
}
|
||||
tsArray.clear();
|
||||
|
||||
for (int i = 0; i < size; i++) {
|
||||
int64_t ts = taosGetTimestampMs();
|
||||
tsArray.push_back(ts);
|
||||
|
||||
SWinKey key = {0}; //{.groupId = (uint64_t)(i), .ts = ts};
|
||||
key.groupId = (uint64_t)(i);
|
||||
key.ts = ts;
|
||||
|
||||
const char *val = "value data";
|
||||
int32_t vlen = strlen(val);
|
||||
streamStatePut_rocksdb(p, &key, (char *)val, vlen);
|
||||
}
|
||||
|
||||
SWinKey winkey;
|
||||
int32_t code = streamStateGetFirst_rocksdb(p, &key);
|
||||
ASSERT(code == 0);
|
||||
ASSERT(key.ts == tsArray[0]);
|
||||
|
||||
SStreamStateCur *pCurr = streamStateSeekToLast_rocksdb(p);
|
||||
ASSERT(pCurr != NULL);
|
||||
streamStateFreeCur(pCurr);
|
||||
|
||||
winkey.groupId = 0;
|
||||
winkey.ts = tsArray[0];
|
||||
char * val = NULL;
|
||||
int32_t len = 0;
|
||||
|
||||
pCurr = streamStateSeekKeyNext_rocksdb(p, &winkey);
|
||||
ASSERT(pCurr != NULL);
|
||||
|
||||
streamStateFreeCur(pCurr);
|
||||
|
||||
tsArray.clear();
|
||||
for (int i = 0; i < size; i++) {
|
||||
int64_t ts = taosGetTimestampMs();
|
||||
tsArray.push_back(ts);
|
||||
STupleKey key = {0};
|
||||
key.groupId = (uint64_t)(0); //= {.groupId = (uint64_t)(0), .ts = ts, .exprIdx = i};
|
||||
key.ts = ts;
|
||||
key.exprIdx = i;
|
||||
|
||||
const char *val = "Value";
|
||||
int32_t len = strlen(val);
|
||||
streamStateFuncPut_rocksdb(p, &key, val, len);
|
||||
}
|
||||
for (int i = 0; i < size; i++) {
|
||||
STupleKey key = {0}; //{.groupId = (uint64_t)(0), .ts = tsArray[i], .exprIdx = i};
|
||||
key.groupId = (uint64_t)(0);
|
||||
key.ts = tsArray[i];
|
||||
key.exprIdx = i;
|
||||
|
||||
char * val = NULL;
|
||||
int32_t len = 0;
|
||||
streamStateFuncGet_rocksdb(p, &key, (void **)&val, &len);
|
||||
ASSERT(len == strlen("Value"));
|
||||
}
|
||||
for (int i = 0; i < size; i++) {
|
||||
STupleKey key = {0}; //{.groupId = (uint64_t)(0), .ts = tsArray[i], .exprIdx = i};
|
||||
key.groupId = (uint64_t)(0);
|
||||
key.ts = tsArray[i];
|
||||
key.exprIdx = i;
|
||||
|
||||
char * val = NULL;
|
||||
int32_t len = 0;
|
||||
streamStateFuncDel_rocksdb(p, &key);
|
||||
}
|
||||
|
||||
// session put
|
||||
tsArray.clear();
|
||||
|
||||
for (int i = 0; i < size; i++) {
|
||||
SSessionKey key = {0}; //{.win = {.skey = i, .ekey = i}, .groupId = (uint64_t)(0)};
|
||||
key.win.skey = i;
|
||||
key.win.ekey = i;
|
||||
key.groupId = (uint64_t)(0);
|
||||
tsArray.push_back(i);
|
||||
|
||||
const char *val = "Value";
|
||||
int32_t len = strlen(val);
|
||||
streamStateSessionPut_rocksdb(p, &key, val, len);
|
||||
|
||||
char *pval = NULL;
|
||||
ASSERT(0 == streamStateSessionGet_rocksdb(p, &key, (void **)&pval, &len));
|
||||
ASSERT(strncmp(pval, val, len) == 0);
|
||||
}
|
||||
|
||||
for (int i = 0; i < size; i++) {
|
||||
SSessionKey key = {0}; //{.win = {.skey = tsArray[i], .ekey = tsArray[i]}, .groupId = (uint64_t)(0)};
|
||||
key.win.skey = tsArray[i];
|
||||
key.win.ekey = tsArray[i];
|
||||
key.groupId = (uint64_t)(0);
|
||||
|
||||
const char *val = "Value";
|
||||
int32_t len = strlen(val);
|
||||
|
||||
char *pval = NULL;
|
||||
ASSERT(0 == streamStateSessionGet_rocksdb(p, &key, (void **)&pval, &len));
|
||||
ASSERT(strncmp(pval, val, len) == 0);
|
||||
taosMemoryFreeClear(pval);
|
||||
}
|
||||
|
||||
pCurr = streamStateSessionSeekToLast_rocksdb(p, 0);
|
||||
ASSERT(pCurr != NULL);
|
||||
|
||||
{
|
||||
SSessionKey key;
|
||||
memset(&key, 0, sizeof(key));
|
||||
char * val = NULL;
|
||||
int32_t vlen = 0;
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pCurr, &key, (void **)&val, &vlen);
|
||||
ASSERT(code == 0);
|
||||
pCurr = streamStateSessionSeekKeyPrev_rocksdb(p, &key);
|
||||
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pCurr, &key, (void **)&val, &vlen);
|
||||
ASSERT(code == 0);
|
||||
|
||||
ASSERT(key.groupId == 0 && key.win.ekey == tsArray[tsArray.size() - 2]);
|
||||
|
||||
pCurr = streamStateSessionSeekKeyNext_rocksdb(p, &key);
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pCurr, &key, (void **)&val, &vlen);
|
||||
ASSERT(code == 0);
|
||||
ASSERT(vlen == strlen("Value"));
|
||||
ASSERT(key.groupId == 0 && key.win.skey == tsArray[tsArray.size() - 1]);
|
||||
|
||||
ASSERT(0 == streamStateSessionAddIfNotExist_rocksdb(p, &key, 10, (void **)&val, &len));
|
||||
|
||||
ASSERT(0 ==
|
||||
streamStateStateAddIfNotExist_rocksdb(p, &key, (char *)"key", strlen("key"), NULL, (void **)&val, &len));
|
||||
}
|
||||
for (int i = 0; i < size; i++) {
|
||||
SSessionKey key = {0}; //{.win = {.skey = tsArray[i], .ekey = tsArray[i]}, .groupId = (uint64_t)(0)};
|
||||
key.win.skey = tsArray[i];
|
||||
key.win.ekey = tsArray[i];
|
||||
key.groupId = (uint64_t)(0);
|
||||
|
||||
const char *val = "Value";
|
||||
int32_t len = strlen(val);
|
||||
|
||||
char *pval = NULL;
|
||||
ASSERT(0 == streamStateSessionDel_rocksdb(p, &key));
|
||||
}
|
||||
|
||||
for (int i = 0; i < size; i++) {
|
||||
SWinKey key = {0}; // {.groupId = (uint64_t)(i), .ts = tsArray[i]};
|
||||
key.groupId = (uint64_t)(i);
|
||||
key.ts = tsArray[i];
|
||||
const char *val = "Value";
|
||||
int32_t vlen = strlen(val);
|
||||
ASSERT(streamStateFillPut_rocksdb(p, &key, val, vlen) == 0);
|
||||
}
|
||||
for (int i = 0; i < size; i++) {
|
||||
SWinKey key = {0}; // {.groupId = (uint64_t)(i), .ts = tsArray[i]};
|
||||
key.groupId = (uint64_t)(i);
|
||||
key.ts = tsArray[i];
|
||||
char * val = NULL;
|
||||
int32_t vlen = 0;
|
||||
ASSERT(streamStateFillGet_rocksdb(p, &key, (void **)&val, &vlen) == 0);
|
||||
taosMemoryFreeClear(val);
|
||||
}
|
||||
{
|
||||
SWinKey key = {0}; //{.groupId = (uint64_t)(0), .ts = tsArray[0]};
|
||||
key.groupId = (uint64_t)(0);
|
||||
key.ts = tsArray[0];
|
||||
SStreamStateCur *pCurr = streamStateFillGetCur_rocksdb(p, &key);
|
||||
ASSERT(pCurr != NULL);
|
||||
|
||||
char * val = NULL;
|
||||
int32_t vlen = 0;
|
||||
ASSERT(0 == streamStateFillGetKVByCur_rocksdb(pCurr, &key, (const void **)&val, &vlen));
|
||||
ASSERT(vlen == strlen("Value"));
|
||||
streamStateFreeCur(pCurr);
|
||||
|
||||
pCurr = streamStateFillSeekKeyNext_rocksdb(p, &key);
|
||||
ASSERT(0 == streamStateFillGetKVByCur_rocksdb(pCurr, &key, (const void **)&val, &vlen));
|
||||
ASSERT(vlen == strlen("Value") && key.groupId == 1 && key.ts == tsArray[1]);
|
||||
|
||||
key.groupId = 1;
|
||||
key.ts = tsArray[1];
|
||||
|
||||
pCurr = streamStateFillSeekKeyPrev_rocksdb(p, &key);
|
||||
ASSERT(pCurr != NULL);
|
||||
ASSERT(0 == streamStateFillGetKVByCur_rocksdb(pCurr, &key, (const void **)&val, &vlen));
|
||||
|
||||
ASSERT(vlen == strlen("Value") && key.groupId == 0 && key.ts == tsArray[0]);
|
||||
}
|
||||
|
||||
for (int i = 0; i < size - 1; i++) {
|
||||
SWinKey key = {0}; // {.groupId = (uint64_t)(i), .ts = tsArray[i]};
|
||||
key.groupId = (uint64_t)(i);
|
||||
key.ts = tsArray[i];
|
||||
char * val = NULL;
|
||||
int32_t vlen = 0;
|
||||
ASSERT(streamStateFillDel_rocksdb(p, &key) == 0);
|
||||
taosMemoryFreeClear(val);
|
||||
}
|
||||
streamStateSessionClear_rocksdb(p);
|
||||
|
||||
for (int i = 0; i < size; i++) {
|
||||
char tbname[TSDB_TABLE_NAME_LEN] = {0};
|
||||
sprintf(tbname, "%s_%d", "tbname", i);
|
||||
ASSERT(0 == streamStatePutParName_rocksdb(p, i, tbname));
|
||||
}
|
||||
for (int i = 0; i < size; i++) {
|
||||
char *val = NULL;
|
||||
ASSERT(0 == streamStateGetParName_rocksdb(p, i, (void **)&val));
|
||||
ASSERT(strncmp(val, "tbname", strlen("tbname")) == 0);
|
||||
taosMemoryFree(val);
|
||||
}
|
||||
|
||||
for (int i = 0; i < size; i++) {
|
||||
char tbname[TSDB_TABLE_NAME_LEN] = {0};
|
||||
sprintf(tbname, "%s_%d", "tbname", i);
|
||||
ASSERT(0 == streamStatePutParName_rocksdb(p, i, tbname));
|
||||
}
|
||||
for (int i = 0; i < size; i++) {
|
||||
char *val = NULL;
|
||||
ASSERT(0 == streamStateGetParName_rocksdb(p, i, (void **)&val));
|
||||
ASSERT(strncmp(val, "tbname", strlen("tbname")) == 0);
|
||||
taosMemoryFree(val);
|
||||
}
|
||||
for (int i = 0; i < size; i++) {
|
||||
char key[128] = {0};
|
||||
sprintf(key, "tbname_%d", i);
|
||||
char val[128] = {0};
|
||||
sprintf(val, "val_%d", i);
|
||||
code = streamDefaultPut_rocksdb(p, key, val, strlen(val));
|
||||
ASSERT(code == 0);
|
||||
}
|
||||
for (int i = 0; i < size; i++) {
|
||||
char key[128] = {0};
|
||||
sprintf(key, "tbname_%d", i);
|
||||
|
||||
char * val = NULL;
|
||||
int32_t len = 0;
|
||||
code = streamDefaultGet_rocksdb(p, key, (void **)&val, &len);
|
||||
ASSERT(code == 0);
|
||||
}
|
||||
SArray *result = taosArrayInit(8, sizeof(void *));
|
||||
streamDefaultIterGet_rocksdb(p, "tbname", "tbname_99", result);
|
||||
ASSERT(taosArrayGetSize(result) >= 0);
|
||||
|
||||
return p;
|
||||
// streamStateClose((SStreamState *)p, true);
|
||||
}
|
||||
TEST_F(BackendEnv, checkOpen) {
|
||||
SStreamState *p = (SStreamState *)backendOpen();
|
||||
int64_t tsStart = taosGetTimestampMs();
|
||||
{
|
||||
void * pBatch = streamStateCreateBatch();
|
||||
int32_t size = 0;
|
||||
for (int i = 0; i < size; i++) {
|
||||
char key[128] = {0};
|
||||
sprintf(key, "key_%d", i);
|
||||
char val[128] = {0};
|
||||
sprintf(val, "val_%d", i);
|
||||
streamStatePutBatch(p, "default", (rocksdb_writebatch_t *)pBatch, (void *)key, (void *)val,
|
||||
(int32_t)(strlen(val)), tsStart + 100000);
|
||||
}
|
||||
streamStatePutBatch_rocksdb(p, pBatch);
|
||||
streamStateDestroyBatch(pBatch);
|
||||
}
|
||||
{
|
||||
void * pBatch = streamStateCreateBatch();
|
||||
int32_t size = 0;
|
||||
char valBuf[256] = {0};
|
||||
for (int i = 0; i < size; i++) {
|
||||
char key[128] = {0};
|
||||
sprintf(key, "key_%d", i);
|
||||
char val[128] = {0};
|
||||
sprintf(val, "val_%d", i);
|
||||
streamStatePutBatchOptimize(p, 0, (rocksdb_writebatch_t *)pBatch, (void *)key, (void *)val,
|
||||
(int32_t)(strlen(val)), tsStart + 100000, (void *)valBuf);
|
||||
}
|
||||
streamStatePutBatch_rocksdb(p, pBatch);
|
||||
streamStateDestroyBatch(pBatch);
|
||||
}
|
||||
// do checkpoint 2
|
||||
taskDbDoCheckpoint(p->pTdbState->pOwner->pBackend, 2);
|
||||
{
|
||||
void * pBatch = streamStateCreateBatch();
|
||||
int32_t size = 0;
|
||||
char valBuf[256] = {0};
|
||||
for (int i = 0; i < size; i++) {
|
||||
char key[128] = {0};
|
||||
sprintf(key, "key_%d", i);
|
||||
char val[128] = {0};
|
||||
sprintf(val, "val_%d", i);
|
||||
streamStatePutBatchOptimize(p, 0, (rocksdb_writebatch_t *)pBatch, (void *)key, (void *)val,
|
||||
(int32_t)(strlen(val)), tsStart + 100000, (void *)valBuf);
|
||||
}
|
||||
streamStatePutBatch_rocksdb(p, pBatch);
|
||||
streamStateDestroyBatch(pBatch);
|
||||
}
|
||||
|
||||
taskDbDoCheckpoint(p->pTdbState->pOwner->pBackend, 3);
|
||||
|
||||
const char *path = "/tmp/backend/stream";
|
||||
const char *dump = "/tmp/backend/stream/dump";
|
||||
// taosMkDir(dump);
|
||||
taosMulMkDir(dump);
|
||||
SBkdMgt *mgt = bkdMgtCreate((char *)path);
|
||||
SArray * result = taosArrayInit(4, sizeof(void *));
|
||||
bkdMgtGetDelta(mgt, p->pTdbState->idstr, 3, result, (char *)dump);
|
||||
|
||||
taskDbDoCheckpoint(p->pTdbState->pOwner->pBackend, 4);
|
||||
|
||||
taosArrayClear(result);
|
||||
bkdMgtGetDelta(mgt, p->pTdbState->idstr, 4, result, (char *)dump);
|
||||
bkdMgtDestroy(mgt);
|
||||
streamStateClose((SStreamState *)p, true);
|
||||
// {
|
||||
// taosRemoveDir("/tmp/backend");
|
||||
// const char * path = "/tmp/backend";
|
||||
// SStreamState *p = stateCreate(path);
|
||||
// }
|
||||
taosRemoveDir(path);
|
||||
// streamStateClose((SStreamState *)p, true);
|
||||
}
|
||||
|
||||
TEST_F(BackendEnv, backendChkp) { const char *path = "/tmp"; }
|
||||
|
||||
typedef struct BdKV {
|
||||
uint32_t k;
|
||||
uint32_t v;
|
||||
} BdKV;
|
||||
|
||||
BdKV kvDict[] = {{0, 2}, {1, 2}, {15, 16}, {31, 32}, {56, 64}, {100, 128},
|
||||
{200, 256}, {500, 512}, {1000, 1024}, {2000, 2048}, {3000, 4096}};
|
||||
|
||||
TEST_F(BackendEnv, backendUtil) {
|
||||
for (int i = 0; i < sizeof(kvDict) / sizeof(kvDict[0]); i++) {
|
||||
ASSERT_EQ(nextPow2((uint32_t)(kvDict[i].k)), kvDict[i].v);
|
||||
}
|
||||
}
|
||||
TEST_F(BackendEnv, oldBackendInit) {
|
||||
const char *path = "/tmp/backend1";
|
||||
taosMulMkDir(path);
|
||||
{
|
||||
SBackendWrapper *p = (SBackendWrapper *)streamBackendInit(path, 10, 10);
|
||||
streamBackendCleanup((void *)p);
|
||||
}
|
||||
{
|
||||
SBackendWrapper *p = (SBackendWrapper *)streamBackendInit(path, 10, 10);
|
||||
streamBackendCleanup((void *)p);
|
||||
}
|
||||
|
||||
taosRemoveDir(path);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
|
@ -25,46 +25,49 @@
|
|||
#pragma GCC diagnostic ignored "-Wunused-variable"
|
||||
#pragma GCC diagnostic ignored "-Wsign-compare"
|
||||
|
||||
// tsSnodeAddress = "";
|
||||
// tsS3StreamEnabled = 0;
|
||||
|
||||
#include "cos.h"
|
||||
#include "rsync.h"
|
||||
#include "streamInt.h"
|
||||
#include "cos.h"
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
// int main(int argc, char **argv) {
|
||||
// testing::InitGoogleTest(&argc, argv);
|
||||
|
||||
if (taosInitCfg("/etc/taos/", NULL, NULL, NULL, NULL, 0) != 0) {
|
||||
printf("error");
|
||||
}
|
||||
if (s3Init() < 0) {
|
||||
return -1;
|
||||
}
|
||||
strcpy(tsSnodeAddress, "127.0.0.1");
|
||||
int ret = RUN_ALL_TESTS();
|
||||
s3CleanUp();
|
||||
return ret;
|
||||
}
|
||||
// if (taosInitCfg("/etc/taos/", NULL, NULL, NULL, NULL, 0) != 0) {
|
||||
// printf("error");
|
||||
// }
|
||||
// if (s3Init() < 0) {
|
||||
// return -1;
|
||||
// }
|
||||
// strcpy(tsSnodeAddress, "127.0.0.1");
|
||||
// int ret = RUN_ALL_TESTS();
|
||||
// s3CleanUp();
|
||||
// return ret;
|
||||
// }
|
||||
|
||||
TEST(testCase, checkpointUpload_Test) {
|
||||
stopRsync();
|
||||
startRsync();
|
||||
// stopRsync();
|
||||
// startRsync();
|
||||
|
||||
taosSsleep(5);
|
||||
char* id = "2013892036";
|
||||
|
||||
uploadCheckpoint(id, "/root/offset/");
|
||||
// uploadCheckpoint(id, "/root/offset/");
|
||||
}
|
||||
|
||||
TEST(testCase, checkpointDownload_Test) {
|
||||
char* id = "2013892036";
|
||||
downloadCheckpoint(id, "/root/offset/download/");
|
||||
// downloadCheckpoint(id, "/root/offset/download/");
|
||||
}
|
||||
|
||||
TEST(testCase, checkpointDelete_Test) {
|
||||
char* id = "2013892036";
|
||||
deleteCheckpoint(id);
|
||||
// deleteCheckpoint(id);
|
||||
}
|
||||
|
||||
TEST(testCase, checkpointDeleteFile_Test) {
|
||||
char* id = "2013892036";
|
||||
deleteCheckpointFile(id, "offset-ver0");
|
||||
// deleteCheckpointFile(id, "offset-ver0");
|
||||
}
|
||||
|
|
|
@ -14,10 +14,7 @@ class StreamStateEnv : public ::testing::Test {
|
|||
streamMetaInit();
|
||||
backend = streamBackendInit(path, 0, 0);
|
||||
}
|
||||
virtual void TearDown() {
|
||||
streamMetaCleanup();
|
||||
// indexClose(index);
|
||||
}
|
||||
virtual void TearDown() { streamMetaCleanup(); }
|
||||
|
||||
const char *path = TD_TMP_DIR_PATH "stream";
|
||||
void *backend;
|
||||
|
@ -50,6 +47,14 @@ bool equalSBF(SScalableBf *left, SScalableBf *right) {
|
|||
}
|
||||
|
||||
TEST(TD_STREAM_UPDATE_TEST, update) {
|
||||
const char *streamPath = "/tmp";
|
||||
|
||||
char *absPath = NULL;
|
||||
void *p = NULL;
|
||||
// SBackendWrapper *p = streamBackendInit(streamPath, -1, 2);
|
||||
// p = taskDbOpen((char *)streamPath, (char *)"test", -1);
|
||||
p = bkdMgtCreate((char *)streamPath);
|
||||
|
||||
// const int64_t interval = 20 * 1000;
|
||||
// const int64_t watermark = 10 * 60 * 1000;
|
||||
// SUpdateInfo *pSU = updateInfoInit(interval, TSDB_TIME_PRECISION_MILLI, watermark);
|
||||
|
|
|
@ -1343,7 +1343,7 @@ ESyncStrategy syncNodeStrategy(SSyncNode* pSyncNode) { return pSyncNode->raftCfg
|
|||
int32_t syncNodeStartPingTimer(SSyncNode* pSyncNode) {
|
||||
int32_t ret = 0;
|
||||
if (syncIsInit()) {
|
||||
taosTmrReset(pSyncNode->FpPingTimerCB, pSyncNode->pingTimerMS, pSyncNode, syncEnv()->pTimerManager,
|
||||
taosTmrReset(pSyncNode->FpPingTimerCB, pSyncNode->pingTimerMS, (void*)pSyncNode->rid, syncEnv()->pTimerManager,
|
||||
&pSyncNode->pPingTimer);
|
||||
atomic_store_64(&pSyncNode->pingTimerLogicClock, pSyncNode->pingTimerLogicClockUser);
|
||||
} else {
|
||||
|
@ -1415,8 +1415,8 @@ void syncNodeResetElectTimer(SSyncNode* pSyncNode) {
|
|||
static int32_t syncNodeDoStartHeartbeatTimer(SSyncNode* pSyncNode) {
|
||||
int32_t ret = 0;
|
||||
if (syncIsInit()) {
|
||||
taosTmrReset(pSyncNode->FpHeartbeatTimerCB, pSyncNode->heartbeatTimerMS, pSyncNode, syncEnv()->pTimerManager,
|
||||
&pSyncNode->pHeartbeatTimer);
|
||||
taosTmrReset(pSyncNode->FpHeartbeatTimerCB, pSyncNode->heartbeatTimerMS, (void*)pSyncNode->rid,
|
||||
syncEnv()->pTimerManager, &pSyncNode->pHeartbeatTimer);
|
||||
atomic_store_64(&pSyncNode->heartbeatTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser);
|
||||
} else {
|
||||
sError("vgId:%d, start heartbeat timer error, sync env is stop", pSyncNode->vgId);
|
||||
|
@ -2153,7 +2153,11 @@ int32_t syncNodeGetPreIndexTerm(SSyncNode* pSyncNode, SyncIndex index, SyncIndex
|
|||
static void syncNodeEqPingTimer(void* param, void* tmrId) {
|
||||
if (!syncIsInit()) return;
|
||||
|
||||
SSyncNode* pNode = param;
|
||||
int64_t rid = (int64_t)param;
|
||||
SSyncNode* pNode = syncNodeAcquire(rid);
|
||||
|
||||
if (pNode == NULL) return;
|
||||
|
||||
if (atomic_load_64(&pNode->pingTimerLogicClockUser) <= atomic_load_64(&pNode->pingTimerLogicClock)) {
|
||||
SRpcMsg rpcMsg = {0};
|
||||
int32_t code = syncBuildTimeout(&rpcMsg, SYNC_TIMEOUT_PING, atomic_load_64(&pNode->pingTimerLogicClock),
|
||||
|
@ -2173,7 +2177,8 @@ static void syncNodeEqPingTimer(void* param, void* tmrId) {
|
|||
}
|
||||
|
||||
_out:
|
||||
taosTmrReset(syncNodeEqPingTimer, pNode->pingTimerMS, pNode, syncEnv()->pTimerManager, &pNode->pPingTimer);
|
||||
taosTmrReset(syncNodeEqPingTimer, pNode->pingTimerMS, (void*)pNode->rid, syncEnv()->pTimerManager,
|
||||
&pNode->pPingTimer);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2224,7 +2229,11 @@ static void syncNodeEqElectTimer(void* param, void* tmrId) {
|
|||
static void syncNodeEqHeartbeatTimer(void* param, void* tmrId) {
|
||||
if (!syncIsInit()) return;
|
||||
|
||||
SSyncNode* pNode = param;
|
||||
int64_t rid = (int64_t)param;
|
||||
SSyncNode* pNode = syncNodeAcquire(rid);
|
||||
|
||||
if (pNode == NULL) return;
|
||||
|
||||
if (pNode->totalReplicaNum > 1) {
|
||||
if (atomic_load_64(&pNode->heartbeatTimerLogicClockUser) <= atomic_load_64(&pNode->heartbeatTimerLogicClock)) {
|
||||
SRpcMsg rpcMsg = {0};
|
||||
|
@ -2245,7 +2254,7 @@ static void syncNodeEqHeartbeatTimer(void* param, void* tmrId) {
|
|||
}
|
||||
|
||||
_out:
|
||||
taosTmrReset(syncNodeEqHeartbeatTimer, pNode->heartbeatTimerMS, pNode, syncEnv()->pTimerManager,
|
||||
taosTmrReset(syncNodeEqHeartbeatTimer, pNode->heartbeatTimerMS, (void*)pNode->rid, syncEnv()->pTimerManager,
|
||||
&pNode->pHeartbeatTimer);
|
||||
|
||||
} else {
|
||||
|
@ -3385,4 +3394,4 @@ bool syncNodeCanChange(SSyncNode* pSyncNode) {
|
|||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -316,7 +316,7 @@ static SPage *tdbPCacheFetchImpl(SPCache *pCache, const SPgid *pPgid, TXN *pTxn)
|
|||
}
|
||||
|
||||
// 3. Try to Recycle a page
|
||||
if (!pPage && !pCache->lru.pLruPrev->isAnchor) {
|
||||
if (!pPageH && !pPage && !pCache->lru.pLruPrev->isAnchor) {
|
||||
pPage = pCache->lru.pLruPrev;
|
||||
tdbPCacheRemovePageFromHash(pCache, pPage);
|
||||
tdbPCachePinPage(pCache, pPage);
|
||||
|
|
|
@ -256,21 +256,21 @@ void transAsyncPoolDestroy(SAsyncPool* pool);
|
|||
int transAsyncSend(SAsyncPool* pool, queue* mq);
|
||||
bool transAsyncPoolIsEmpty(SAsyncPool* pool);
|
||||
|
||||
#define TRANS_DESTROY_ASYNC_POOL_MSG(pool, msgType, freeFunc) \
|
||||
do { \
|
||||
for (int i = 0; i < pool->nAsync; i++) { \
|
||||
uv_async_t* async = &(pool->asyncs[i]); \
|
||||
SAsyncItem* item = async->data; \
|
||||
while (!QUEUE_IS_EMPTY(&item->qmsg)) { \
|
||||
tTrace("destroy msg in async pool "); \
|
||||
queue* h = QUEUE_HEAD(&item->qmsg); \
|
||||
QUEUE_REMOVE(h); \
|
||||
msgType* msg = QUEUE_DATA(h, msgType, q); \
|
||||
if (msg != NULL) { \
|
||||
freeFunc(msg); \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
#define TRANS_DESTROY_ASYNC_POOL_MSG(pool, msgType, freeFunc, param) \
|
||||
do { \
|
||||
for (int i = 0; i < pool->nAsync; i++) { \
|
||||
uv_async_t* async = &(pool->asyncs[i]); \
|
||||
SAsyncItem* item = async->data; \
|
||||
while (!QUEUE_IS_EMPTY(&item->qmsg)) { \
|
||||
tTrace("destroy msg in async pool "); \
|
||||
queue* h = QUEUE_HEAD(&item->qmsg); \
|
||||
QUEUE_REMOVE(h); \
|
||||
msgType* msg = QUEUE_DATA(h, msgType, q); \
|
||||
if (msg != NULL) { \
|
||||
freeFunc(msg, param); \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ASYNC_CHECK_HANDLE(exh1, id) \
|
||||
|
|
|
@ -191,6 +191,15 @@ static void httpDestroyMsg(SHttpMsg* msg) {
|
|||
taosMemoryFree(msg->cont);
|
||||
taosMemoryFree(msg);
|
||||
}
|
||||
static void httpDestroyMsgWrapper(void* cont, void* param) {
|
||||
httpDestroyMsg((SHttpMsg*)cont);
|
||||
// if (msg == NULL) return;
|
||||
|
||||
// taosMemoryFree(msg->server);
|
||||
// taosMemoryFree(msg->uri);
|
||||
// taosMemoryFree(msg->cont);
|
||||
// taosMemoryFree(msg);
|
||||
}
|
||||
|
||||
static void httpMayDiscardMsg(SHttpModule* http, SAsyncItem* item) {
|
||||
SHttpMsg *msg = NULL, *quitMsg = NULL;
|
||||
|
@ -554,7 +563,7 @@ void transHttpEnvDestroy() {
|
|||
httpSendQuit();
|
||||
taosThreadJoin(load->thread, NULL);
|
||||
|
||||
TRANS_DESTROY_ASYNC_POOL_MSG(load->asyncPool, SHttpMsg, httpDestroyMsg);
|
||||
TRANS_DESTROY_ASYNC_POOL_MSG(load->asyncPool, SHttpMsg, httpDestroyMsgWrapper, NULL);
|
||||
transAsyncPoolDestroy(load->asyncPool);
|
||||
uv_loop_close(load->loop);
|
||||
taosMemoryFree(load->loop);
|
||||
|
|
|
@ -219,6 +219,8 @@ static void (*cliAsyncHandle[])(SCliMsg* pMsg, SCliThrd* pThrd) = {cliHandleReq,
|
|||
/// NULL,cliHandleUpdate};
|
||||
|
||||
static FORCE_INLINE void destroyCmsg(void* cmsg);
|
||||
|
||||
static FORCE_INLINE void destroyCmsgWrapper(void* arg, void* param);
|
||||
static FORCE_INLINE void destroyCmsgAndAhandle(void* cmsg);
|
||||
static FORCE_INLINE int cliRBChoseIdx(STrans* pTransInst);
|
||||
static FORCE_INLINE void transDestroyConnCtx(STransConnCtx* ctx);
|
||||
|
@ -582,8 +584,8 @@ void* destroyConnPool(SCliThrd* pThrd) {
|
|||
|
||||
static SCliConn* getConnFromPool(SCliThrd* pThrd, char* key, bool* exceed) {
|
||||
void* pool = pThrd->pool;
|
||||
SConnList* plist = taosHashGet((SHashObj*)pool, key, strlen(key) + 1);
|
||||
STrans* pTranInst = pThrd->pTransInst;
|
||||
SConnList* plist = taosHashGet((SHashObj*)pool, key, strlen(key) + 1);
|
||||
if (plist == NULL) {
|
||||
SConnList list = {0};
|
||||
taosHashPut((SHashObj*)pool, key, strlen(key) + 1, (void*)&list, sizeof(list));
|
||||
|
@ -865,17 +867,18 @@ static void cliDestroyConn(SCliConn* conn, bool clear) {
|
|||
QUEUE_INIT(&conn->q);
|
||||
|
||||
conn->broken = true;
|
||||
if (conn->list == NULL) {
|
||||
conn->list = taosHashGet((SHashObj*)pThrd->pool, conn->dstAddr, strlen(conn->dstAddr));
|
||||
}
|
||||
|
||||
if (conn->list != NULL) {
|
||||
SConnList* connList = conn->list;
|
||||
connList->list->numOfConn--;
|
||||
connList->size--;
|
||||
} else {
|
||||
if (pThrd->pool) {
|
||||
SConnList* connList = taosHashGet((SHashObj*)pThrd->pool, conn->dstAddr, strlen(conn->dstAddr) + 1);
|
||||
if (connList != NULL) connList->list->numOfConn--;
|
||||
if (conn->list) {
|
||||
SConnList* list = conn->list;
|
||||
list->list->numOfConn--;
|
||||
if (conn->status == ConnInPool) {
|
||||
list->size--;
|
||||
}
|
||||
}
|
||||
|
||||
conn->list = NULL;
|
||||
pThrd->newConnCount--;
|
||||
|
||||
|
@ -1963,7 +1966,17 @@ static FORCE_INLINE void destroyCmsg(void* arg) {
|
|||
transFreeMsg(pMsg->msg.pCont);
|
||||
taosMemoryFree(pMsg);
|
||||
}
|
||||
|
||||
static FORCE_INLINE void destroyCmsgWrapper(void* arg, void* param) {
|
||||
SCliMsg* pMsg = arg;
|
||||
if (pMsg == NULL) {
|
||||
return;
|
||||
}
|
||||
if (param != NULL) {
|
||||
SCliThrd* pThrd = param;
|
||||
if (pThrd->destroyAhandleFp) (*pThrd->destroyAhandleFp)(pMsg->msg.info.ahandle);
|
||||
}
|
||||
destroyCmsg(pMsg);
|
||||
}
|
||||
static FORCE_INLINE void destroyCmsgAndAhandle(void* param) {
|
||||
if (param == NULL) return;
|
||||
|
||||
|
@ -2057,7 +2070,7 @@ static void destroyThrdObj(SCliThrd* pThrd) {
|
|||
taosThreadJoin(pThrd->thread, NULL);
|
||||
CLI_RELEASE_UV(pThrd->loop);
|
||||
taosThreadMutexDestroy(&pThrd->msgMtx);
|
||||
TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SCliMsg, destroyCmsg);
|
||||
TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SCliMsg, destroyCmsgWrapper, (void*)pThrd);
|
||||
transAsyncPoolDestroy(pThrd->asyncPool);
|
||||
|
||||
transDQDestroy(pThrd->delayQueue, destroyCmsgAndAhandle);
|
||||
|
|
|
@ -527,6 +527,10 @@ void uvOnSendCb(uv_write_t* req, int status) {
|
|||
if (!transQueueEmpty(&conn->srvMsgs)) {
|
||||
msg = (SSvrMsg*)transQueueGet(&conn->srvMsgs, 0);
|
||||
if (msg->type == Register && conn->status == ConnAcquire) {
|
||||
if (conn->regArg.init) {
|
||||
transFreeMsg(conn->regArg.msg.pCont);
|
||||
conn->regArg.init = 0;
|
||||
}
|
||||
conn->regArg.notifyCount = 0;
|
||||
conn->regArg.init = 1;
|
||||
conn->regArg.msg = msg->msg;
|
||||
|
@ -671,7 +675,8 @@ static FORCE_INLINE void destroySmsg(SSvrMsg* smsg) {
|
|||
transFreeMsg(smsg->msg.pCont);
|
||||
taosMemoryFree(smsg);
|
||||
}
|
||||
static void destroyAllConn(SWorkThrd* pThrd) {
|
||||
static FORCE_INLINE void destroySmsgWrapper(void* smsg, void* param) { destroySmsg((SSvrMsg*)smsg); }
|
||||
static void destroyAllConn(SWorkThrd* pThrd) {
|
||||
tTrace("thread %p destroy all conn ", pThrd);
|
||||
while (!QUEUE_IS_EMPTY(&pThrd->conn)) {
|
||||
queue* h = QUEUE_HEAD(&pThrd->conn);
|
||||
|
@ -1349,6 +1354,11 @@ void uvHandleRegister(SSvrMsg* msg, SWorkThrd* thrd) {
|
|||
return;
|
||||
}
|
||||
transQueuePop(&conn->srvMsgs);
|
||||
|
||||
if (conn->regArg.init) {
|
||||
transFreeMsg(conn->regArg.msg.pCont);
|
||||
conn->regArg.init = 0;
|
||||
}
|
||||
conn->regArg.notifyCount = 0;
|
||||
conn->regArg.init = 1;
|
||||
conn->regArg.msg = msg->msg;
|
||||
|
@ -1394,7 +1404,7 @@ void destroyWorkThrd(SWorkThrd* pThrd) {
|
|||
}
|
||||
taosThreadJoin(pThrd->thread, NULL);
|
||||
SRV_RELEASE_UV(pThrd->loop);
|
||||
TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SSvrMsg, destroySmsg);
|
||||
TRANS_DESTROY_ASYNC_POOL_MSG(pThrd->asyncPool, SSvrMsg, destroySmsgWrapper, NULL);
|
||||
transAsyncPoolDestroy(pThrd->asyncPool);
|
||||
|
||||
uvWhiteListDestroy(pThrd->pWhiteList);
|
||||
|
|
|
@ -89,12 +89,14 @@ static int32_t taosArrayResize(SArray* pArray) {
|
|||
int32_t taosArrayEnsureCap(SArray* pArray, size_t newCap) {
|
||||
if (newCap > pArray->capacity) {
|
||||
float factor = BOUNDARY_BIG_FACTOR;
|
||||
if(newCap * pArray->elemSize > BOUNDARY_SIZE){
|
||||
if (newCap * pArray->elemSize > BOUNDARY_SIZE) {
|
||||
factor = BOUNDARY_SMALL_FACTOR;
|
||||
}
|
||||
|
||||
size_t tsize = (pArray->capacity * factor);
|
||||
while (newCap > tsize) {
|
||||
tsize = (tsize * factor);
|
||||
size_t newSize = (tsize * factor);
|
||||
tsize = (newSize == tsize) ? (tsize + 2) : newSize;
|
||||
}
|
||||
|
||||
pArray->pData = taosMemoryRealloc(pArray->pData, tsize * pArray->elemSize);
|
||||
|
|
|
@ -457,6 +457,7 @@ static int32_t cfgAddItem(SConfig *pCfg, SConfigItem *pItem, const char *name) {
|
|||
if (pItem->dtype == CFG_DTYPE_STRING) {
|
||||
taosMemoryFree(pItem->str);
|
||||
}
|
||||
|
||||
taosMemoryFree(pItem->name);
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
|
|
|
@ -329,6 +329,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_VIEW_NOT_EXIST, "view not exists in db
|
|||
|
||||
//mnode-compact
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_COMPACT_ID, "Invalid compact id")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_COMPACT_DETAIL_NOT_EXIST, "compact detail doesn't exist")
|
||||
|
||||
// dnode
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_DNODE_OFFLINE, "Dnode is offline")
|
||||
|
|
|
@ -21,6 +21,40 @@
|
|||
int64_t tsRpcQueueMemoryAllowed = 0;
|
||||
int64_t tsRpcQueueMemoryUsed = 0;
|
||||
|
||||
struct STaosQueue {
|
||||
STaosQnode *head;
|
||||
STaosQnode *tail;
|
||||
STaosQueue *next; // for queue set
|
||||
STaosQset *qset; // for queue set
|
||||
void *ahandle; // for queue set
|
||||
FItem itemFp;
|
||||
FItems itemsFp;
|
||||
TdThreadMutex mutex;
|
||||
int64_t memOfItems;
|
||||
int32_t numOfItems;
|
||||
int64_t threadId;
|
||||
int64_t memLimit;
|
||||
int64_t itemLimit;
|
||||
};
|
||||
|
||||
struct STaosQset {
|
||||
STaosQueue *head;
|
||||
STaosQueue *current;
|
||||
TdThreadMutex mutex;
|
||||
tsem_t sem;
|
||||
int32_t numOfQueues;
|
||||
int32_t numOfItems;
|
||||
};
|
||||
|
||||
struct STaosQall {
|
||||
STaosQnode *current;
|
||||
STaosQnode *start;
|
||||
int32_t numOfItems;
|
||||
int64_t memOfItems;
|
||||
int32_t unAccessedNumOfItems;
|
||||
int64_t unAccessMemOfItems;
|
||||
};
|
||||
|
||||
void taosSetQueueMemoryCapacity(STaosQueue *queue, int64_t cap) { queue->memLimit = cap; }
|
||||
void taosSetQueueCapacity(STaosQueue *queue, int64_t size) { queue->itemLimit = size; }
|
||||
|
||||
|
@ -497,6 +531,12 @@ int64_t taosQallUnAccessedMemSize(STaosQall *qall) { return qall->unAccessMemOfI
|
|||
void taosResetQitems(STaosQall *qall) { qall->current = qall->start; }
|
||||
int32_t taosGetQueueNumber(STaosQset *qset) { return qset->numOfQueues; }
|
||||
|
||||
void taosQueueSetThreadId(STaosQueue* pQueue, int64_t threadId) {
|
||||
pQueue->threadId = threadId;
|
||||
}
|
||||
|
||||
int64_t taosQueueGetThreadId(STaosQueue *pQueue) { return pQueue->threadId; }
|
||||
|
||||
#if 0
|
||||
|
||||
void taosResetQsetThread(STaosQset *qset, void *pItem) {
|
||||
|
|
|
@ -20,6 +20,9 @@
|
|||
|
||||
#define DEFAULT_GROWTH 2
|
||||
#define DEFAULT_TIGHTENING_RATIO 0.5
|
||||
#define DEFAULT_MAX_BLOOMFILTERS 4
|
||||
#define SBF_INVALID -1
|
||||
#define SBF_VALID 0
|
||||
|
||||
static SBloomFilter *tScalableBfAddFilter(SScalableBf *pSBf, uint64_t expectedEntries, double errorRate);
|
||||
|
||||
|
@ -32,6 +35,8 @@ SScalableBf *tScalableBfInit(uint64_t expectedEntries, double errorRate) {
|
|||
if (pSBf == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
pSBf->maxBloomFilters = DEFAULT_MAX_BLOOMFILTERS;
|
||||
pSBf->status = SBF_VALID;
|
||||
pSBf->numBits = 0;
|
||||
pSBf->bfArray = taosArrayInit(defaultSize, sizeof(void *));
|
||||
if (tScalableBfAddFilter(pSBf, expectedEntries, errorRate * DEFAULT_TIGHTENING_RATIO) == NULL) {
|
||||
|
@ -45,6 +50,9 @@ SScalableBf *tScalableBfInit(uint64_t expectedEntries, double errorRate) {
|
|||
}
|
||||
|
||||
int32_t tScalableBfPutNoCheck(SScalableBf *pSBf, const void *keyBuf, uint32_t len) {
|
||||
if (pSBf->status == SBF_INVALID) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
int32_t size = taosArrayGetSize(pSBf->bfArray);
|
||||
SBloomFilter *pNormalBf = taosArrayGetP(pSBf->bfArray, size - 1);
|
||||
ASSERT(pNormalBf);
|
||||
|
@ -52,6 +60,7 @@ int32_t tScalableBfPutNoCheck(SScalableBf *pSBf, const void *keyBuf, uint32_t le
|
|||
pNormalBf = tScalableBfAddFilter(pSBf, pNormalBf->expectedEntries * pSBf->growth,
|
||||
pNormalBf->errorRate * DEFAULT_TIGHTENING_RATIO);
|
||||
if (pNormalBf == NULL) {
|
||||
pSBf->status = SBF_INVALID;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
|
@ -59,6 +68,9 @@ int32_t tScalableBfPutNoCheck(SScalableBf *pSBf, const void *keyBuf, uint32_t le
|
|||
}
|
||||
|
||||
int32_t tScalableBfPut(SScalableBf *pSBf, const void *keyBuf, uint32_t len) {
|
||||
if (pSBf->status == SBF_INVALID) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
uint64_t h1 = (uint64_t)pSBf->hashFn1(keyBuf, len);
|
||||
uint64_t h2 = (uint64_t)pSBf->hashFn2(keyBuf, len);
|
||||
int32_t size = taosArrayGetSize(pSBf->bfArray);
|
||||
|
@ -74,6 +86,7 @@ int32_t tScalableBfPut(SScalableBf *pSBf, const void *keyBuf, uint32_t len) {
|
|||
pNormalBf = tScalableBfAddFilter(pSBf, pNormalBf->expectedEntries * pSBf->growth,
|
||||
pNormalBf->errorRate * DEFAULT_TIGHTENING_RATIO);
|
||||
if (pNormalBf == NULL) {
|
||||
pSBf->status = SBF_INVALID;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
|
@ -81,6 +94,9 @@ int32_t tScalableBfPut(SScalableBf *pSBf, const void *keyBuf, uint32_t len) {
|
|||
}
|
||||
|
||||
int32_t tScalableBfNoContain(const SScalableBf *pSBf, const void *keyBuf, uint32_t len) {
|
||||
if (pSBf->status == SBF_INVALID) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
uint64_t h1 = (uint64_t)pSBf->hashFn1(keyBuf, len);
|
||||
uint64_t h2 = (uint64_t)pSBf->hashFn2(keyBuf, len);
|
||||
int32_t size = taosArrayGetSize(pSBf->bfArray);
|
||||
|
@ -93,6 +109,10 @@ int32_t tScalableBfNoContain(const SScalableBf *pSBf, const void *keyBuf, uint32
|
|||
}
|
||||
|
||||
static SBloomFilter *tScalableBfAddFilter(SScalableBf *pSBf, uint64_t expectedEntries, double errorRate) {
|
||||
if (taosArrayGetSize(pSBf->bfArray) >= pSBf->maxBloomFilters) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SBloomFilter *pNormalBf = tBloomFilterInit(expectedEntries, errorRate);
|
||||
if (pNormalBf == NULL) {
|
||||
return NULL;
|
||||
|
@ -128,6 +148,8 @@ int32_t tScalableBfEncode(const SScalableBf *pSBf, SEncoder *pEncoder) {
|
|||
}
|
||||
if (tEncodeU32(pEncoder, pSBf->growth) < 0) return -1;
|
||||
if (tEncodeU64(pEncoder, pSBf->numBits) < 0) return -1;
|
||||
if (tEncodeU32(pEncoder, pSBf->maxBloomFilters) < 0) return -1;
|
||||
if (tEncodeI8(pEncoder, pSBf->status) < 0) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -150,6 +172,8 @@ SScalableBf *tScalableBfDecode(SDecoder *pDecoder) {
|
|||
}
|
||||
if (tDecodeU32(pDecoder, &pSBf->growth) < 0) goto _error;
|
||||
if (tDecodeU64(pDecoder, &pSBf->numBits) < 0) goto _error;
|
||||
if (tDecodeU32(pDecoder, &pSBf->maxBloomFilters) < 0) goto _error;
|
||||
if (tDecodeI8(pDecoder, &pSBf->status) < 0) goto _error;
|
||||
return pSBf;
|
||||
|
||||
_error:
|
||||
|
|
|
@ -417,9 +417,9 @@ _OVER:
|
|||
return NULL;
|
||||
} else {
|
||||
while (worker->pid <= 0) taosMsleep(10);
|
||||
queue->threadId = worker->pid;
|
||||
uInfo("worker:%s, queue:%p is allocated, ahandle:%p thread:%08" PRId64, pool->name, queue, ahandle,
|
||||
queue->threadId);
|
||||
|
||||
taosQueueSetThreadId(queue, worker->pid);
|
||||
uInfo("worker:%s, queue:%p is allocated, ahandle:%p thread:%08" PRId64, pool->name, queue, ahandle, worker->pid);
|
||||
return queue;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,9 @@ class TDTestCase(TBase):
|
|||
# clusterDnodes.starttaosd(1)
|
||||
# time.sleep(5)
|
||||
autoGen.insert_data(5000, True)
|
||||
tdSql.execute(f"flush database {self.db}")
|
||||
self.flushDb(True)
|
||||
# wait flush operation over
|
||||
time.sleep(5)
|
||||
|
||||
# sql = 'show vnodes;'
|
||||
# while True:
|
||||
|
|
|
@ -115,7 +115,7 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/ins_topics_test.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqMaxTopic.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqParamsTest.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqParamsTest.py -R
|
||||
#,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqParamsTest.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqClientConsLog.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqMaxGroupIds.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqConsumeDiscontinuousData.py
|
||||
|
@ -228,7 +228,7 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/stbTagFilter-1ctb.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/dataFromTsdbNWal.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/dataFromTsdbNWal-multiCtb.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_taosx.py
|
||||
,,n,system-test,python3 ./test.py -f 7-tmq/tmq_taosx.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_replay.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqSeekAndCommit.py
|
||||
,,n,system-test,python3 ./test.py -f 7-tmq/tmq_offset.py
|
||||
|
@ -238,7 +238,8 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqSubscribeStb-r3.py -N 5
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 6 -M 3 -i True
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 6 -M 3 -n 3 -i True
|
||||
,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform.py -N 2 -n 1
|
||||
,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform-db-removewal.py -N 2 -n 1
|
||||
,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform-stb-removewal.py -N 6 -n 3
|
||||
,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform-stb.py -N 2 -n 1
|
||||
,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform-stb.py -N 6 -n 3
|
||||
#,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform-db.py -N 6 -n 3
|
||||
|
@ -1085,6 +1086,7 @@
|
|||
,,y,script,./test.sh -f tsim/parser/join_multivnode.sim
|
||||
,,y,script,./test.sh -f tsim/parser/join.sim
|
||||
,,y,script,./test.sh -f tsim/parser/last_cache.sim
|
||||
,,y,script,./test.sh -f tsim/parser/last_both.sim
|
||||
,,y,script,./test.sh -f tsim/parser/last_groupby.sim
|
||||
,,y,script,./test.sh -f tsim/parser/lastrow.sim
|
||||
,,y,script,./test.sh -f tsim/parser/lastrow2.sim
|
||||
|
@ -1211,6 +1213,7 @@
|
|||
,,y,script,./test.sh -f tsim/stream/deleteState.sim
|
||||
,,y,script,./test.sh -f tsim/stream/distributeInterval0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/distributeMultiLevelInterval0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/distributeSession0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/drop_stream.sim
|
||||
,,y,script,./test.sh -f tsim/stream/event0.sim
|
||||
|
|
|
@ -214,7 +214,7 @@ function lcovFunc {
|
|||
'*/clientJniConnector.c' '*/clientTmqConnector.c' '*/version.c' '*/build_version.cc'\
|
||||
'*/tthread.c' '*/tversion.c' '*/ctgDbg.c' '*/schDbg.c' '*/qwDbg.c' '*/tencode.h' \
|
||||
'*/shellAuto.c' '*/shellTire.c' '*/shellCommand.c'\
|
||||
'*/sql.c' '*/sql.y'\
|
||||
'*/sql.c' '*/sql.y' '*/smaSnapshot.c' '*/smaCommit.c'\
|
||||
--branch-coverage --function-coverage -o coverage.info
|
||||
|
||||
# generate result
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue