Merge remote-tracking branch 'origin/3.0' into feat/TD-24700
This commit is contained in:
commit
0546bdceb2
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.1.0.0.alpha")
|
||||
SET(TD_VER_NUMBER "3.1.1.0.alpha")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -98,7 +98,7 @@ Provides information about user-created databases. Similar to SHOW DATABASES.
|
|||
| 21 | cachesize | INT | Memory per vnode used for caching the newest data. It should be noted that `cachesize` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 22 | wal_level | INT | WAL level. It should be noted that `wal_level` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 23 | wal_fsync_period | INT | Interval at which WAL is written to disk. It should be noted that `wal_fsync_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 24 | wal_retention_period | INT | WAL retention period. It should be noted that `wal_retention_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 24 | wal_retention_period | INT | WAL retention period, in second. It should be noted that `wal_retention_period` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 25 | wal_retention_size | INT | Maximum WAL size. It should be noted that `wal_retention_size` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 26 | stt_trigger | SMALLINT | The threshold for number of files to trigger file merging. It should be noted that `stt_trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 27 | table_prefix | SMALLINT | The prefix length in the table name that is ignored when distributing table to vnode based on table name. It should be noted that `table_prefix` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
@ -297,3 +297,13 @@ Provides dnode configuration information.
|
|||
| 7 | target_table | BINARY(192) | Target table |
|
||||
| 8 | watermark | BIGINT | Watermark (see stream processing documentation). It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation). It should be noted that `trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
|
||||
|
||||
## INS_USER_PRIVILEGES
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** |** |
|
||||
| --- | :----------: | ------------ | -------------------------------------------|
|
||||
| 1 | user_name | VARCHAR(24) | Username |
|
||||
| 2 | privilege | VARCHAR(10) | Privilege description |
|
||||
| 3 | db_name | VARCHAR(65) | Database name |
|
||||
| 4 | table_name | VARCHAR(193) | Table name |
|
||||
| 5 | condition | VARCHAR(49152) | The privilege filter for child tables |
|
||||
|
|
|
@ -79,6 +79,12 @@ Parameter Description:
|
|||
- tz: Optional parameter that specifies the timezone of the returned time, following the IANA Time Zone rules, e.g. `America/New_York`.
|
||||
- req_id: Optional parameter that specifies the request id for tracing.
|
||||
|
||||
:::note
|
||||
|
||||
URL Encoding. Make sure that parameters are properly encoded. For example, when specifying a timezone you must properly encode special characters. ?tz=Etc/GMT+10 will not work because the <+> plus symbol is recognized as a space in the url. It's best practice to encode all special characters in a parameter. Instead use ?tz=Etc%2FGMT%2B10 for the parameter.
|
||||
|
||||
:::
|
||||
|
||||
For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:6041` and sets the default database name to `test`.
|
||||
|
||||
TDengine supports both Basic authentication and custom authentication mechanisms, and subsequent versions will provide a standard secure digital signature mechanism for authentication.
|
||||
|
|
|
@ -670,6 +670,15 @@ The charset that takes effect is UTF-8.
|
|||
| Value Range | 0: not consistent; 1: consistent. |
|
||||
| Default | 0 |
|
||||
|
||||
### smlTsDefaultName
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------------------------------------------- |
|
||||
| Applicable | Client only |
|
||||
| Meaning | The name of the time column for schemaless automatic table creation is set through this configuration |
|
||||
| Type | String |
|
||||
| Default Value | _ts |
|
||||
|
||||
## Compress Parameters
|
||||
|
||||
### compressMsgSize
|
||||
|
|
|
@ -34,7 +34,27 @@ In the schemaless writing data line protocol, each data item in the field_set ne
|
|||
|
||||
- If there are English double quotes on both sides, it indicates the BINARY(32) type. For example, `"abc"`.
|
||||
- If there are double quotes on both sides and an L prefix, it means NCHAR(32) type. For example, `L"error message"`.
|
||||
- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\\) in front. (All refer to the ASCII character)
|
||||
- Spaces, equals sign (=), comma (,), double quote ("), and backslash (\\) need to be escaped with a backslash (\\) in front. (All refer to the ASCII character). The rules are as follows:
|
||||
|
||||
| **Serial number** | **Element** | **Escape characters** |
|
||||
| -------- | ----------- | ----------------------------- |
|
||||
| 1 | Measurement | Comma, Space |
|
||||
| 2 | Tag key | Comma, Equals Sign, Space |
|
||||
| 3 | Tag value | Comma, Equals Sign, Space |
|
||||
| 4 | Field key | Comma, Equals Sign, Space |
|
||||
| 5 | Field value | Double quote, Backslash |
|
||||
|
||||
With two contiguous backslashes, the first is interpreted as an escape character. Examples of backslash escape rules are as follows:
|
||||
|
||||
| **Serial number** | **Backslashes** | **Interpreted as** |
|
||||
| -------- | ----------- | ----------------------------- |
|
||||
| 1 | \ | \ |
|
||||
| 2 | \\\\ | \ |
|
||||
| 3 | \\\\\\ | \\\\ |
|
||||
| 4 | \\\\\\\\ | \\\\ |
|
||||
| 5 | \\\\\\\\\\ | \\\\\\ |
|
||||
| 6 | \\\\\\\\\\\\ | \\\\\\ |
|
||||
|
||||
- Numeric types will be distinguished from data types by the suffix.
|
||||
|
||||
| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** |
|
||||
|
@ -88,6 +108,8 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
|
|||
|
||||
8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.
|
||||
Note: TDengine 3.0.3.0 and later automatically detect whether order is consistent. This parameter is no longer used.
|
||||
9. Due to the fact that SQL table names do not support period (.), schemaless has also processed period (.). If there is a period (.) in the table name automatically created by schemaless, it will be automatically replaced with an underscore (\_). If you manually specify a sub table name, if there is a dot (.) in the sub table name, it will also be converted to an underscore (\_)
|
||||
10. Taos.cfg adds the configuration of smlTsDefaultName (with a string value), which only works on the client side. After configuration, the time column name of the schemaless automatic table creation can be set through this configuration. If not configured, defaults to _ts.
|
||||
|
||||
:::tip
|
||||
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
|
||||
|
|
|
@ -338,7 +338,7 @@ Remark:
|
|||
Equivalent function: sum
|
||||
|
||||
```sql
|
||||
Select max(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s)
|
||||
Select sum(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s)
|
||||
```
|
||||
|
||||
Note: This function has no interpolation requirements, so it can be directly calculated.
|
||||
|
|
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.1.0.0
|
||||
|
||||
<Release type="tdengine" version="3.1.0.0" />
|
||||
|
||||
## 3.0.7.1
|
||||
|
||||
<Release type="tdengine" version="3.0.7.1" />
|
||||
|
|
|
@ -98,7 +98,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 21 | cachesize | INT | 表示每个 vnode 中用于缓存子表最近数据的内存大小。需要注意,`cachesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 22 | wal_level | INT | WAL 级别。需要注意,`wal_level` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 23 | wal_fsync_period | INT | 数据落盘周期。需要注意,`wal_fsync_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 24 | wal_retention_period | INT | WAL 的保存时长。需要注意,`wal_retention_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 24 | wal_retention_period | INT | WAL 的保存时长,单位为秒。需要注意,`wal_retention_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 25 | wal_retention_size | INT | WAL 的保存上限。需要注意,`wal_retention_size` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 26 | stt_trigger | SMALLINT | 触发文件合并的落盘文件的个数。需要注意,`stt_trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 27 | table_prefix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。需要注意,`table_prefix` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
@ -298,3 +298,13 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 7 | target_table | BINARY(192) | 流计算写入的目标表 |
|
||||
| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算。需要注意,`trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
||||
## INS_USER_PRIVILEGES
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :----------: | ------------ | -------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | user_name | VARCHAR(24) | 用户名
|
||||
| 2 | privilege | VARCHAR(10) | 权限描述
|
||||
| 3 | db_name | VARCHAR(65) | 数据库名称
|
||||
| 4 | table_name | VARCHAR(193) | 表名称
|
||||
| 5 | condition | VARCHAR(49152) | 子表权限过滤条件
|
||||
|
|
|
@ -685,7 +685,16 @@ charset 的有效值是 UTF-8。
|
|||
| 适用范围 | 仅客户端适用 |
|
||||
| 含义 | schemaless 列数据是否顺序一致,从3.0.3.0开始,该配置废弃 |
|
||||
| 值域 | 0:不一致;1: 一致 |
|
||||
| 缺省值 | 0 |
|
||||
| 缺省值 | 0
|
||||
|
||||
### smlTsDefaultName
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------------------------------- |
|
||||
| 适用范围 | 仅客户端适用 |
|
||||
| 含义 | schemaless自动建表的时间列名字通过该配置设置 |
|
||||
| 类型 | 字符串 |
|
||||
| 缺省值 | _ts |
|
||||
|
||||
## 其他
|
||||
|
||||
|
|
|
@ -35,12 +35,32 @@ tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要
|
|||
|
||||
- 如果两边有英文双引号,表示 BINARY(32) 类型。例如 `"abc"`。
|
||||
- 如果两边有英文双引号而且带有 L 前缀,表示 NCHAR(32) 类型。例如 `L"报错信息"`。
|
||||
- 对空格、等号(=)、逗号(,)、双引号("),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号)
|
||||
- 对空格、等号(=)、逗号(,)、双引号(")、反斜杠(\),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号)。具体转义规则如下:
|
||||
|
||||
| **序号** | **域** | **需转义字符** |
|
||||
| -------- | ----------- | ----------------------------- |
|
||||
| 1 | 超级表名 | 逗号,空格 |
|
||||
| 2 | 标签名 | 逗号,等号,空格 |
|
||||
| 3 | 标签值 | 逗号,等号,空格 |
|
||||
| 4 | 列名 | 逗号,等号,空格 |
|
||||
| 5 | 列值 | 双引号,反斜杠 |
|
||||
|
||||
两个连续的反斜杠,第一个作为转义符,只有一个反斜杠则无需转义. 反斜杠转义规则举例如下:
|
||||
|
||||
| **序号** | **反斜杠** | **转义为** |
|
||||
| -------- | ----------- | ----------------------------- |
|
||||
| 1 | \ | \ |
|
||||
| 2 | \\\\ | \ |
|
||||
| 3 | \\\\\\ | \\\\ |
|
||||
| 4 | \\\\\\\\ | \\\\ |
|
||||
| 5 | \\\\\\\\\\ | \\\\\\ |
|
||||
| 6 | \\\\\\\\\\\\ | \\\\\\ |
|
||||
|
||||
- 数值类型将通过后缀来区分数据类型:
|
||||
|
||||
| **序号** | **后缀** | **映射类型** | **大小(字节)** |
|
||||
| **序号** | **后缀** | **映射类型** | **大小(字节)** |
|
||||
| -------- | ----------- | ----------------------------- | -------------- |
|
||||
| 1 | 无或 f64 | double | 8 |
|
||||
| 1 | 无或 f64 | double | 8 |
|
||||
| 2 | f32 | float | 4 |
|
||||
| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
|
||||
| 4 | i16/u16 | SmallInt/USmallInt | 2 |
|
||||
|
@ -84,7 +104,9 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
|
|||
6. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。
|
||||
7. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。
|
||||
8. 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常,从3.0.3.0开始,自动检测顺序是否一致,该配置废弃。
|
||||
|
||||
9. 由于sql建表表名不支持点号(.),所以schemaless也对点号(.)做了处理,如果schemaless自动建表的表名如果有点号(.),会自动替换为下划线(\_)。如果手动指定子表名的话,子表名里有点号(.),同样转化为下划线(\_)。
|
||||
10. taos.cfg 增加 smlTsDefaultName 配置(值为字符串),只在client端起作用,配置后,schemaless自动建表的时间列名字可以通过该配置设置。不配置的话,默认为 _ts
|
||||
|
||||
:::tip
|
||||
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
|
||||
48KB(从 3.0.5.0 版本开始为 64KB),标签值的总长度不超过16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit)
|
||||
|
|
|
@ -371,7 +371,7 @@ Select min(val) from table_name
|
|||
等效函数:sum
|
||||
|
||||
```sql
|
||||
Select max(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s)
|
||||
Select sum(value) from (select first(val) value from table_name interval(10s) fill(linear)) interval(10s)
|
||||
```
|
||||
|
||||
备注:该函数无插值需求,因此可用直接计算。
|
||||
|
|
|
@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.1.0.0
|
||||
|
||||
<Release type="tdengine" version="3.1.0.0" />
|
||||
|
||||
## 3.0.7.1
|
||||
|
||||
<Release type="tdengine" version="3.0.7.1" />
|
||||
|
|
|
@ -169,6 +169,8 @@ extern char tsUdfdLdLibPath[];
|
|||
// schemaless
|
||||
extern char tsSmlChildTableName[];
|
||||
extern char tsSmlTagName[];
|
||||
extern bool tsSmlDot2Underline;
|
||||
extern char tsSmlTsDefaultName[];
|
||||
// extern bool tsSmlDataFormat;
|
||||
// extern int32_t tsSmlBatchSize;
|
||||
|
||||
|
|
|
@ -225,13 +225,9 @@ int32_t qStreamSourceScanParamForHistoryScanStep2(qTaskInfo_t tinfo, SVersionRan
|
|||
int32_t qStreamRecoverFinish(qTaskInfo_t tinfo);
|
||||
int32_t qRestoreStreamOperatorOption(qTaskInfo_t tinfo);
|
||||
bool qStreamRecoverScanFinished(qTaskInfo_t tinfo);
|
||||
bool qStreamRecoverScanStep1Finished(qTaskInfo_t tinfo);
|
||||
bool qStreamRecoverScanStep2Finished(qTaskInfo_t tinfo);
|
||||
int32_t qStreamRecoverSetAllStepFinished(qTaskInfo_t tinfo);
|
||||
int32_t qStreamInfoResetTimewindowFilter(qTaskInfo_t tinfo);
|
||||
void resetTaskInfo(qTaskInfo_t tinfo);
|
||||
|
||||
void qResetStreamInfoTimeWindow(qTaskInfo_t tinfo);
|
||||
|
||||
int32_t qStreamOperatorReleaseState(qTaskInfo_t tInfo);
|
||||
int32_t qStreamOperatorReloadState(qTaskInfo_t tInfo);
|
||||
|
||||
|
|
|
@ -368,6 +368,8 @@ typedef struct SStateStore {
|
|||
bool (*updateInfoIsUpdated)(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts);
|
||||
bool (*updateInfoIsTableInserted)(SUpdateInfo* pInfo, int64_t tbUid);
|
||||
void (*updateInfoDestroy)(SUpdateInfo* pInfo);
|
||||
void (*windowSBfDelete)(SUpdateInfo *pInfo, uint64_t count);
|
||||
void (*windowSBfAdd)(SUpdateInfo *pInfo, uint64_t count);
|
||||
|
||||
SUpdateInfo* (*updateInfoInitP)(SInterval* pInterval, int64_t watermark);
|
||||
void (*updateInfoAddCloseWindowSBF)(SUpdateInfo* pInfo);
|
||||
|
|
|
@ -45,7 +45,6 @@ enum {
|
|||
TASK_STATUS__FAIL,
|
||||
TASK_STATUS__STOP,
|
||||
TASK_STATUS__SCAN_HISTORY, // stream task scan history data by using tsdbread in the stream scanner
|
||||
TASK_STATUS__SCAN_HISTORY_WAL, // scan history data in wal
|
||||
TASK_STATUS__HALT, // pause, but not be manipulated by user command
|
||||
TASK_STATUS__PAUSE, // pause
|
||||
};
|
||||
|
@ -591,7 +590,7 @@ bool streamTaskIsIdle(const SStreamTask* pTask);
|
|||
int32_t streamTaskEndScanWAL(SStreamTask* pTask);
|
||||
|
||||
SStreamChildEpInfo * streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t taskId);
|
||||
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz);
|
||||
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize);
|
||||
|
||||
char* createStreamTaskIdStr(int64_t streamId, int32_t taskId);
|
||||
|
||||
|
@ -605,15 +604,10 @@ int32_t streamSendCheckRsp(const SStreamMeta* pMeta, const SStreamTaskCheckReq*
|
|||
int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp);
|
||||
int32_t streamLaunchFillHistoryTask(SStreamTask* pTask);
|
||||
int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask);
|
||||
int32_t streamStartRecoverTask(SStreamTask* pTask, int8_t igUntreated);
|
||||
int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated);
|
||||
bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer);
|
||||
|
||||
bool streamTaskRecoverScanStep1Finished(SStreamTask* pTask);
|
||||
bool streamTaskRecoverScanStep2Finished(SStreamTask* pTask);
|
||||
int32_t streamTaskRecoverSetAllStepFinished(SStreamTask* pTask);
|
||||
|
||||
// common
|
||||
int32_t streamSetParamForScanHistory(SStreamTask* pTask);
|
||||
int32_t streamRestoreParam(SStreamTask* pTask);
|
||||
int32_t streamSetStatusNormal(SStreamTask* pTask);
|
||||
const char* streamGetTaskStatusStr(int32_t status);
|
||||
|
@ -627,7 +621,6 @@ void streamTaskEnablePause(SStreamTask* pTask);
|
|||
// source level
|
||||
int32_t streamSetParamForStreamScannerStep1(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
|
||||
int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
|
||||
int32_t streamBuildSourceRecover1Req(SStreamTask* pTask, SStreamScanHistoryReq* pReq, int8_t igUntreated);
|
||||
int32_t streamSourceScanHistoryData(SStreamTask* pTask);
|
||||
int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask);
|
||||
|
||||
|
|
|
@ -53,6 +53,8 @@ void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo);
|
|||
void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo);
|
||||
int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo);
|
||||
int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo);
|
||||
void windowSBfDelete(SUpdateInfo *pInfo, uint64_t count);
|
||||
void windowSBfAdd(SUpdateInfo *pInfo, uint64_t count);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -781,6 +781,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_TMQ_INVALID_TOPIC TAOS_DEF_ERROR_CODE(0, 0x4009)
|
||||
#define TSDB_CODE_TMQ_NEED_INITIALIZED TAOS_DEF_ERROR_CODE(0, 0x4010)
|
||||
#define TSDB_CODE_TMQ_NO_COMMITTED TAOS_DEF_ERROR_CODE(0, 0x4011)
|
||||
#define TSDB_CODE_TMQ_SAME_COMMITTED_VALUE TAOS_DEF_ERROR_CODE(0, 0x4012)
|
||||
|
||||
// stream
|
||||
#define TSDB_CODE_STREAM_TASK_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x4100)
|
||||
|
|
|
@ -200,7 +200,7 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_STREAM_NAME_LEN 193 // it is a null-terminated string
|
||||
#define TSDB_DB_NAME_LEN 65
|
||||
#define TSDB_DB_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
|
||||
#define TSDB_PRIVILEDGE_CONDITION_LEN 200
|
||||
#define TSDB_PRIVILEDGE_CONDITION_LEN 48*1024
|
||||
|
||||
#define TSDB_FUNC_NAME_LEN 65
|
||||
#define TSDB_FUNC_COMMENT_LEN 1024 * 1024
|
||||
|
@ -369,8 +369,13 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_DB_SCHEMALESS_OFF 0
|
||||
#define TSDB_DEFAULT_DB_SCHEMALESS TSDB_DB_SCHEMALESS_OFF
|
||||
#define TSDB_MIN_STT_TRIGGER 1
|
||||
#define TSDB_MAX_STT_TRIGGER 16
|
||||
#define TSDB_DEFAULT_SST_TRIGGER 2
|
||||
#ifdef TD_ENTERPRISE
|
||||
#define TSDB_MAX_STT_TRIGGER 16
|
||||
#define TSDB_DEFAULT_SST_TRIGGER 2
|
||||
#else
|
||||
#define TSDB_MAX_STT_TRIGGER 1
|
||||
#define TSDB_DEFAULT_SST_TRIGGER 1
|
||||
#endif
|
||||
#define TSDB_MIN_HASH_PREFIX (2 - TSDB_TABLE_NAME_LEN)
|
||||
#define TSDB_MAX_HASH_PREFIX (TSDB_TABLE_NAME_LEN - 2)
|
||||
#define TSDB_DEFAULT_HASH_PREFIX 0
|
||||
|
|
|
@ -64,8 +64,8 @@ extern "C" {
|
|||
#define IS_INVALID_COL_LEN(len) ((len) <= 0 || (len) >= TSDB_COL_NAME_LEN)
|
||||
#define IS_INVALID_TABLE_LEN(len) ((len) <= 0 || (len) >= TSDB_TABLE_NAME_LEN)
|
||||
|
||||
#define TS "_ts"
|
||||
#define TS_LEN 3
|
||||
//#define TS "_ts"
|
||||
//#define TS_LEN 3
|
||||
#define VALUE "_value"
|
||||
#define VALUE_LEN 6
|
||||
|
||||
|
@ -258,6 +258,7 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
|
|||
int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
|
||||
int32_t smlParseJSON(SSmlHandle *info, char *payload);
|
||||
|
||||
void smlStrReplace(char* src, int32_t len);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -104,7 +104,7 @@ static int32_t smlCheckAuth(SSmlHandle *info, SRequestConnInfo* conn, const cha
|
|||
SUserAuthRes authRes = {0};
|
||||
|
||||
code = catalogChkAuth(info->pCatalog, conn, &pAuth, &authRes);
|
||||
|
||||
nodesDestroyNode(authRes.pCond);
|
||||
|
||||
return (code == TSDB_CODE_SUCCESS) ? (authRes.pass ? TSDB_CODE_SUCCESS : TSDB_CODE_PAR_PERMISSION_DENIED) : code;
|
||||
|
||||
|
@ -114,6 +114,15 @@ inline bool smlDoubleToInt64OverFlow(double num) {
|
|||
return false;
|
||||
}
|
||||
|
||||
void smlStrReplace(char* src, int32_t len){
|
||||
if (!tsSmlDot2Underline) return;
|
||||
for(int i = 0; i < len; i++){
|
||||
if(src[i] == '.'){
|
||||
src[i] = '_';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int32_t smlBuildInvalidDataMsg(SSmlMsgBuf *pBuf, const char *msg1, const char *msg2) {
|
||||
if (pBuf->buf) {
|
||||
memset(pBuf->buf, 0, pBuf->len);
|
||||
|
@ -193,6 +202,9 @@ static int32_t smlParseTableName(SArray *tags, char *childTableName) {
|
|||
if (childTableNameLen == tag->keyLen && strncmp(tag->key, tsSmlChildTableName, tag->keyLen) == 0) {
|
||||
memset(childTableName, 0, TSDB_TABLE_NAME_LEN);
|
||||
strncpy(childTableName, tag->value, (tag->length < TSDB_TABLE_NAME_LEN ? tag->length : TSDB_TABLE_NAME_LEN));
|
||||
if(tsSmlDot2Underline){
|
||||
smlStrReplace(childTableName, strlen(childTableName));
|
||||
}
|
||||
taosArrayRemove(tags, i);
|
||||
break;
|
||||
}
|
||||
|
@ -838,6 +850,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
|||
char *measure = taosMemoryMalloc(superTableLen);
|
||||
memcpy(measure, superTable, superTableLen);
|
||||
PROCESS_SLASH_IN_MEASUREMENT(measure, superTableLen);
|
||||
smlStrReplace(measure, superTableLen);
|
||||
memset(pName.tname, 0, TSDB_TABLE_NAME_LEN);
|
||||
memcpy(pName.tname, measure, superTableLen);
|
||||
taosMemoryFree(measure);
|
||||
|
@ -1051,7 +1064,8 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
|||
taosMemoryFreeClear(sTableData->tableMeta);
|
||||
sTableData->tableMeta = pTableMeta;
|
||||
uDebug("SML:0x%" PRIx64 "modify schema uid:%" PRIu64 ", sversion:%d, tversion:%d", info->id, pTableMeta->uid,
|
||||
pTableMeta->sversion, pTableMeta->tversion) tmp = (SSmlSTableMeta **)taosHashIterate(info->superTables, tmp);
|
||||
pTableMeta->sversion, pTableMeta->tversion);
|
||||
tmp = (SSmlSTableMeta **)taosHashIterate(info->superTables, tmp);
|
||||
}
|
||||
uDebug("SML:0x%" PRIx64 " smlModifyDBSchemas end success, format:%d, needModifySchema:%d", info->id, info->dataFormat,
|
||||
info->needModifySchema);
|
||||
|
@ -1394,7 +1408,14 @@ static int32_t smlInsertData(SSmlHandle *info) {
|
|||
SSmlTableInfo **oneTable = (SSmlTableInfo **)taosHashIterate(info->childTables, NULL);
|
||||
while (oneTable) {
|
||||
SSmlTableInfo *tableData = *oneTable;
|
||||
tstrncpy(pName.tname, tableData->sTableName, tableData->sTableNameLen + 1);
|
||||
|
||||
int measureLen = tableData->sTableNameLen;
|
||||
char *measure = (char *)taosMemoryMalloc(tableData->sTableNameLen);
|
||||
memcpy(measure, tableData->sTableName, tableData->sTableNameLen);
|
||||
PROCESS_SLASH_IN_MEASUREMENT(measure, measureLen);
|
||||
smlStrReplace(measure, measureLen);
|
||||
memset(pName.tname, 0, TSDB_TABLE_NAME_LEN);
|
||||
memcpy(pName.tname, measure, measureLen);
|
||||
|
||||
if (info->pRequest->tableList == NULL) {
|
||||
info->pRequest->tableList = taosArrayInit(1, sizeof(SName));
|
||||
|
@ -1411,6 +1432,7 @@ static int32_t smlInsertData(SSmlHandle *info) {
|
|||
|
||||
code = smlCheckAuth(info, &conn, pName.tname, AUTH_TYPE_WRITE);
|
||||
if(code != TSDB_CODE_SUCCESS){
|
||||
taosMemoryFree(measure);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1418,6 +1440,7 @@ static int32_t smlInsertData(SSmlHandle *info) {
|
|||
code = catalogGetTableHashVgroup(info->pCatalog, &conn, &pName, &vg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%" PRIx64 " catalogGetTableHashVgroup failed. table name: %s", info->id, tableData->childTableName);
|
||||
taosMemoryFree(measure);
|
||||
return code;
|
||||
}
|
||||
taosHashPut(info->pVgHash, (const char *)&vg.vgId, sizeof(vg.vgId), (char *)&vg, sizeof(vg));
|
||||
|
@ -1426,6 +1449,7 @@ static int32_t smlInsertData(SSmlHandle *info) {
|
|||
(SSmlSTableMeta **)taosHashGet(info->superTables, tableData->sTableName, tableData->sTableNameLen);
|
||||
if (unlikely(NULL == pMeta || NULL == (*pMeta)->tableMeta)) {
|
||||
uError("SML:0x%" PRIx64 " NULL == pMeta. table name: %s", info->id, tableData->childTableName);
|
||||
taosMemoryFree(measure);
|
||||
return TSDB_CODE_SML_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
|
@ -1435,11 +1459,6 @@ static int32_t smlInsertData(SSmlHandle *info) {
|
|||
uDebug("SML:0x%" PRIx64 " smlInsertData table:%s, uid:%" PRIu64 ", format:%d", info->id, pName.tname,
|
||||
tableData->uid, info->dataFormat);
|
||||
|
||||
int measureLen = tableData->sTableNameLen;
|
||||
char *measure = (char *)taosMemoryMalloc(tableData->sTableNameLen);
|
||||
memcpy(measure, tableData->sTableName, tableData->sTableNameLen);
|
||||
PROCESS_SLASH_IN_MEASUREMENT(measure, measureLen);
|
||||
|
||||
code = smlBindData(info->pQuery, info->dataFormat, tableData->tags, (*pMeta)->cols, tableData->cols,
|
||||
(*pMeta)->tableMeta, tableData->childTableName, measure, measureLen, info->ttl, info->msgBuf.buf,
|
||||
info->msgBuf.len);
|
||||
|
|
|
@ -996,8 +996,8 @@ static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo
|
|||
uError("OTD:0x%" PRIx64 " Unable to parse timestamp from JSON payload", info->id);
|
||||
return TSDB_CODE_INVALID_TIMESTAMP;
|
||||
}
|
||||
SSmlKv kvTs = {.key = TS,
|
||||
.keyLen = TS_LEN,
|
||||
SSmlKv kvTs = {.key = tsSmlTsDefaultName,
|
||||
.keyLen = strlen(tsSmlTsDefaultName),
|
||||
.type = TSDB_DATA_TYPE_TIMESTAMP,
|
||||
.i = ts,
|
||||
.length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes};
|
||||
|
@ -1200,8 +1200,8 @@ static int32_t smlParseJSONString(SSmlHandle *info, char **start, SSmlLineInfo *
|
|||
return TSDB_CODE_INVALID_TIMESTAMP;
|
||||
}
|
||||
}
|
||||
SSmlKv kvTs = {.key = TS,
|
||||
.keyLen = TS_LEN,
|
||||
SSmlKv kvTs = {.key = tsSmlTsDefaultName,
|
||||
.keyLen = strlen(tsSmlTsDefaultName),
|
||||
.type = TSDB_DATA_TYPE_TIMESTAMP,
|
||||
.i = ts,
|
||||
.length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes};
|
||||
|
|
|
@ -157,6 +157,7 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
|
|||
measure = (char *)taosMemoryMalloc(currElement->measureLen);
|
||||
memcpy(measure, currElement->measure, currElement->measureLen);
|
||||
PROCESS_SLASH_IN_MEASUREMENT(measure, measureLen);
|
||||
smlStrReplace(measure, measureLen);
|
||||
}
|
||||
STableMeta *pTableMeta = smlGetMeta(info, measure, measureLen);
|
||||
if (currElement->measureEscaped) {
|
||||
|
@ -365,6 +366,7 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
|
|||
measure = (char *)taosMemoryMalloc(currElement->measureLen);
|
||||
memcpy(measure, currElement->measure, currElement->measureLen);
|
||||
PROCESS_SLASH_IN_MEASUREMENT(measure, measureLen);
|
||||
smlStrReplace(measure, measureLen);
|
||||
}
|
||||
STableMeta *pTableMeta = smlGetMeta(info, measure, measureLen);
|
||||
if (currElement->measureEscaped) {
|
||||
|
@ -651,8 +653,8 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
|
|||
return TSDB_CODE_INVALID_TIMESTAMP;
|
||||
}
|
||||
// add ts to
|
||||
SSmlKv kv = {.key = TS,
|
||||
.keyLen = TS_LEN,
|
||||
SSmlKv kv = {.key = tsSmlTsDefaultName,
|
||||
.keyLen = strlen(tsSmlTsDefaultName),
|
||||
.type = TSDB_DATA_TYPE_TIMESTAMP,
|
||||
.i = ts,
|
||||
.length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes,
|
||||
|
|
|
@ -260,8 +260,8 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
|
|||
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", sql);
|
||||
return TSDB_CODE_INVALID_TIMESTAMP;
|
||||
}
|
||||
SSmlKv kvTs = {.key = TS,
|
||||
.keyLen = TS_LEN,
|
||||
SSmlKv kvTs = {.key = tsSmlTsDefaultName,
|
||||
.keyLen = strlen(tsSmlTsDefaultName),
|
||||
.type = TSDB_DATA_TYPE_TIMESTAMP,
|
||||
.i = ts,
|
||||
.length = (size_t)tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes};
|
||||
|
|
|
@ -586,30 +586,36 @@ static int32_t asyncCommitOffset(tmq_t* tmq, char* pTopicName, int32_t vgId, STq
|
|||
if(code != 0){
|
||||
goto end;
|
||||
}
|
||||
if (offsetVal->type > 0 && !tOffsetEqual(offsetVal, &pVg->offsetInfo.committedOffset)) {
|
||||
char offsetBuf[TSDB_OFFSET_LEN] = {0};
|
||||
tFormatOffset(offsetBuf, tListLen(offsetBuf), offsetVal);
|
||||
|
||||
char commitBuf[TSDB_OFFSET_LEN] = {0};
|
||||
tFormatOffset(commitBuf, tListLen(commitBuf), &pVg->offsetInfo.committedOffset);
|
||||
|
||||
SMqCommitCbParamSet* pParamSet = prepareCommitCbParamSet(tmq, pCommitFp, userParam, 0);
|
||||
if (pParamSet == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto end;
|
||||
}
|
||||
code = doSendCommitMsg(tmq, pVg->vgId, &pVg->epSet, offsetVal, pTopicName, pParamSet);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("consumer:0x%" PRIx64 " topic:%s on vgId:%d end commit msg failed, send offset:%s committed:%s, code:%s",
|
||||
tmq->consumerId, pTopicName, pVg->vgId, offsetBuf, commitBuf, tstrerror(terrno));
|
||||
taosMemoryFree(pParamSet);
|
||||
goto end;
|
||||
}
|
||||
|
||||
tscInfo("consumer:0x%" PRIx64 " topic:%s on vgId:%d send commit msg success, send offset:%s committed:%s",
|
||||
tmq->consumerId, pTopicName, pVg->vgId, offsetBuf, commitBuf);
|
||||
pVg->offsetInfo.committedOffset = *offsetVal;
|
||||
if (offsetVal->type <= 0) {
|
||||
code = TSDB_CODE_TMQ_INVALID_MSG;
|
||||
goto end;
|
||||
}
|
||||
if (tOffsetEqual(offsetVal, &pVg->offsetInfo.committedOffset)){
|
||||
code = TSDB_CODE_TMQ_SAME_COMMITTED_VALUE;
|
||||
goto end;
|
||||
}
|
||||
char offsetBuf[TSDB_OFFSET_LEN] = {0};
|
||||
tFormatOffset(offsetBuf, tListLen(offsetBuf), offsetVal);
|
||||
|
||||
char commitBuf[TSDB_OFFSET_LEN] = {0};
|
||||
tFormatOffset(commitBuf, tListLen(commitBuf), &pVg->offsetInfo.committedOffset);
|
||||
|
||||
SMqCommitCbParamSet* pParamSet = prepareCommitCbParamSet(tmq, pCommitFp, userParam, 0);
|
||||
if (pParamSet == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto end;
|
||||
}
|
||||
code = doSendCommitMsg(tmq, pVg->vgId, &pVg->epSet, offsetVal, pTopicName, pParamSet);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("consumer:0x%" PRIx64 " topic:%s on vgId:%d end commit msg failed, send offset:%s committed:%s, code:%s",
|
||||
tmq->consumerId, pTopicName, pVg->vgId, offsetBuf, commitBuf, tstrerror(terrno));
|
||||
taosMemoryFree(pParamSet);
|
||||
goto end;
|
||||
}
|
||||
|
||||
tscInfo("consumer:0x%" PRIx64 " topic:%s on vgId:%d send commit msg success, send offset:%s committed:%s",
|
||||
tmq->consumerId, pTopicName, pVg->vgId, offsetBuf, commitBuf);
|
||||
pVg->offsetInfo.committedOffset = *offsetVal;
|
||||
|
||||
end:
|
||||
taosRUnLockLatch(&tmq->lock);
|
||||
|
@ -650,7 +656,8 @@ static void asyncCommitFromResult(tmq_t* tmq, const TAOS_RES* pRes, tmq_commit_c
|
|||
code = asyncCommitOffset(tmq, pTopicName, vgId, &offsetVal, pCommitFp, userParam);
|
||||
|
||||
end:
|
||||
if(code != TSDB_CODE_SUCCESS){
|
||||
if(code != TSDB_CODE_SUCCESS && pCommitFp != NULL){
|
||||
if(code == TSDB_CODE_TMQ_SAME_COMMITTED_VALUE) code = TSDB_CODE_SUCCESS;
|
||||
pCommitFp(tmq, code, userParam);
|
||||
}
|
||||
}
|
||||
|
@ -1859,8 +1866,8 @@ static int32_t tmqHandleNoPollRsp(tmq_t* tmq, SMqRspWrapper* rspWrapper, bool* p
|
|||
static void updateVgInfo(SMqClientVg* pVg, STqOffsetVal* reqOffset, STqOffsetVal* rspOffset, int64_t sver, int64_t ever, int64_t consumerId){
|
||||
if (!pVg->seekUpdated) {
|
||||
tscDebug("consumer:0x%" PRIx64" local offset is update, since seekupdate not set", consumerId);
|
||||
if(reqOffset->type != 0) pVg->offsetInfo.beginOffset = *reqOffset;
|
||||
if(rspOffset->type != 0) pVg->offsetInfo.endOffset = *rspOffset;
|
||||
pVg->offsetInfo.beginOffset = *reqOffset;
|
||||
pVg->offsetInfo.endOffset = *rspOffset;
|
||||
} else {
|
||||
tscDebug("consumer:0x%" PRIx64" local offset is NOT update, since seekupdate is set", consumerId);
|
||||
}
|
||||
|
@ -1948,7 +1955,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
|||
}
|
||||
taosWUnLockLatch(&tmq->lock);
|
||||
} else {
|
||||
tscDebug("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
|
||||
tscInfo("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
|
||||
tmq->consumerId, pollRspWrapper->vgId, pDataRsp->head.epoch, consumerEpoch);
|
||||
pRspWrapper = tmqFreeRspWrapper(pRspWrapper);
|
||||
taosFreeQitem(pollRspWrapper);
|
||||
|
@ -1979,7 +1986,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
|||
taosWUnLockLatch(&tmq->lock);
|
||||
return pRsp;
|
||||
} else {
|
||||
tscDebug("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
|
||||
tscInfo("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
|
||||
tmq->consumerId, pollRspWrapper->vgId, pollRspWrapper->metaRsp.head.epoch, consumerEpoch);
|
||||
pRspWrapper = tmqFreeRspWrapper(pRspWrapper);
|
||||
taosFreeQitem(pollRspWrapper);
|
||||
|
@ -2036,7 +2043,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
|||
taosWUnLockLatch(&tmq->lock);
|
||||
return pRsp;
|
||||
} else {
|
||||
tscDebug("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
|
||||
tscInfo("consumer:0x%" PRIx64 " vgId:%d msg discard since epoch mismatch: msg epoch %d, consumer epoch %d",
|
||||
tmq->consumerId, pollRspWrapper->vgId, pollRspWrapper->taosxRsp.head.epoch, consumerEpoch);
|
||||
pRspWrapper = tmqFreeRspWrapper(pRspWrapper);
|
||||
taosFreeQitem(pollRspWrapper);
|
||||
|
@ -2350,7 +2357,7 @@ int32_t tmq_commit_sync(tmq_t* tmq, const TAOS_RES* pRes) {
|
|||
tsem_destroy(&pInfo->sem);
|
||||
taosMemoryFree(pInfo);
|
||||
|
||||
tscDebug("consumer:0x%" PRIx64 " sync commit done, code:%s", tmq->consumerId, tstrerror(code));
|
||||
tscInfo("consumer:0x%" PRIx64 " sync res commit done, code:%s", tmq->consumerId, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -2406,15 +2413,17 @@ int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId,
|
|||
tsem_init(&pInfo->sem, 0, 0);
|
||||
pInfo->code = 0;
|
||||
|
||||
asyncCommitOffset(tmq, tname, vgId, &offsetVal, commitCallBackFn, pInfo);
|
||||
|
||||
tsem_wait(&pInfo->sem);
|
||||
code = pInfo->code;
|
||||
code = asyncCommitOffset(tmq, tname, vgId, &offsetVal, commitCallBackFn, pInfo);
|
||||
if(code == 0){
|
||||
tsem_wait(&pInfo->sem);
|
||||
code = pInfo->code;
|
||||
}
|
||||
|
||||
if(code == TSDB_CODE_TMQ_SAME_COMMITTED_VALUE) code = TSDB_CODE_SUCCESS;
|
||||
tsem_destroy(&pInfo->sem);
|
||||
taosMemoryFree(pInfo);
|
||||
|
||||
tscInfo("consumer:0x%" PRIx64 " sync send seek to vgId:%d, offset:%" PRId64" code:%s", tmq->consumerId, vgId, offset, tstrerror(code));
|
||||
tscInfo("consumer:0x%" PRIx64 " sync send commit to vgId:%d, offset:%" PRId64" code:%s", tmq->consumerId, vgId, offset, tstrerror(code));
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -2451,10 +2460,11 @@ void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, i
|
|||
|
||||
code = asyncCommitOffset(tmq, tname, vgId, &offsetVal, cb, param);
|
||||
|
||||
tscInfo("consumer:0x%" PRIx64 " async send seek to vgId:%d, offset:%" PRId64" code:%s", tmq->consumerId, vgId, offset, tstrerror(code));
|
||||
tscInfo("consumer:0x%" PRIx64 " async send commit to vgId:%d, offset:%" PRId64" code:%s", tmq->consumerId, vgId, offset, tstrerror(code));
|
||||
|
||||
end:
|
||||
if(code != 0 && cb != NULL){
|
||||
if(code == TSDB_CODE_TMQ_SAME_COMMITTED_VALUE) code = TSDB_CODE_SUCCESS;
|
||||
cb(tmq, code, param);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1251,7 +1251,8 @@ TEST(clientCase, td_25129) {
|
|||
}
|
||||
|
||||
for(int i = 0; i < numOfAssign; i++){
|
||||
printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end);
|
||||
int64_t committed = tmq_committed(tmq, topicName, pAssign[i].vgId);
|
||||
printf("assign i:%d, vgId:%d, committed:%lld, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, committed, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end);
|
||||
}
|
||||
|
||||
while (1) {
|
||||
|
|
|
@ -105,6 +105,8 @@ char *tsClientCrashReportUri = "/ccrashreport";
|
|||
char *tsSvrCrashReportUri = "/dcrashreport";
|
||||
|
||||
// schemaless
|
||||
bool tsSmlDot2Underline = true;
|
||||
char tsSmlTsDefaultName[TSDB_COL_NAME_LEN] = "_ts";
|
||||
char tsSmlTagName[TSDB_COL_NAME_LEN] = "_tag_null";
|
||||
char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; // user defined child table name can be specified in tag value.
|
||||
// If set to empty system will generate table name using MD5 hash.
|
||||
|
@ -366,6 +368,8 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
|
|||
if (cfgAddBool(pCfg, "keepColumnName", tsKeepColumnName, CFG_SCOPE_CLIENT) != 0) return -1;
|
||||
if (cfgAddString(pCfg, "smlChildTableName", "", CFG_SCOPE_CLIENT) != 0) return -1;
|
||||
if (cfgAddString(pCfg, "smlTagName", tsSmlTagName, CFG_SCOPE_CLIENT) != 0) return -1;
|
||||
if (cfgAddString(pCfg, "smlTsDefaultName", tsSmlTsDefaultName, CFG_SCOPE_CLIENT) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "smlDot2Underline", tsSmlDot2Underline, CFG_SCOPE_CLIENT) != 0) return -1;
|
||||
// if (cfgAddBool(pCfg, "smlDataFormat", tsSmlDataFormat, CFG_SCOPE_CLIENT) != 0) return -1;
|
||||
// if (cfgAddInt32(pCfg, "smlBatchSize", tsSmlBatchSize, 1, INT32_MAX, CFG_SCOPE_CLIENT) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "maxInsertBatchRows", tsMaxInsertBatchRows, 1, INT32_MAX, CFG_SCOPE_CLIENT) != 0) return -1;
|
||||
|
@ -801,6 +805,8 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
|
|||
|
||||
tstrncpy(tsSmlChildTableName, cfgGetItem(pCfg, "smlChildTableName")->str, TSDB_TABLE_NAME_LEN);
|
||||
tstrncpy(tsSmlTagName, cfgGetItem(pCfg, "smlTagName")->str, TSDB_COL_NAME_LEN);
|
||||
tstrncpy(tsSmlTsDefaultName, cfgGetItem(pCfg, "smlTsDefaultName")->str, TSDB_COL_NAME_LEN);
|
||||
tsSmlDot2Underline = cfgGetItem(pCfg, "smlDot2Underline")->bval;
|
||||
// tsSmlDataFormat = cfgGetItem(pCfg, "smlDataFormat")->bval;
|
||||
|
||||
// tsSmlBatchSize = cfgGetItem(pCfg, "smlBatchSize")->i32;
|
||||
|
@ -1243,6 +1249,10 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) {
|
|||
// tsSmlDataFormat = cfgGetItem(pCfg, "smlDataFormat")->bval;
|
||||
// } else if (strcasecmp("smlBatchSize", name) == 0) {
|
||||
// tsSmlBatchSize = cfgGetItem(pCfg, "smlBatchSize")->i32;
|
||||
} else if(strcasecmp("smlTsDefaultName", name) == 0) {
|
||||
tstrncpy(tsSmlTsDefaultName, cfgGetItem(pCfg, "smlTsDefaultName")->str, TSDB_COL_NAME_LEN);
|
||||
} else if(strcasecmp("smlDot2Underline", name) == 0) {
|
||||
tsSmlDot2Underline = cfgGetItem(pCfg, "smlDot2Underline")->bval;
|
||||
} else if (strcasecmp("shellActivityTimer", name) == 0) {
|
||||
tsShellActivityTimer = cfgGetItem(pCfg, "shellActivityTimer")->i32;
|
||||
} else if (strcasecmp("supportVnodes", name) == 0) {
|
||||
|
|
|
@ -7317,6 +7317,9 @@ bool tOffsetEqual(const STqOffsetVal *pLeft, const STqOffsetVal *pRight) {
|
|||
return pLeft->uid == pRight->uid && pLeft->ts == pRight->ts;
|
||||
} else if (pLeft->type == TMQ_OFFSET__SNAPSHOT_META) {
|
||||
return pLeft->uid == pRight->uid;
|
||||
} else {
|
||||
uError("offset type:%d", pLeft->type);
|
||||
ASSERT(0);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
|
||||
static SDnode globalDnode = {0};
|
||||
static const char *dmOS[10] = {"Ubuntu", "CentOS Linux", "Red Hat", "Debian GNU", "CoreOS",
|
||||
"FreeBSD", "openSUSE", "SLES", "Fedora", "MacOS"};
|
||||
"FreeBSD", "openSUSE", "SLES", "Fedora", "macOS"};
|
||||
|
||||
SDnode *dmInstance() { return &globalDnode; }
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ void mndDropConsumerFromSdb(SMnode *pMnode, int64_t consumerId){
|
|||
|
||||
bool mndRebTryStart() {
|
||||
int32_t old = atomic_val_compare_exchange_32(&mqRebInExecCnt, 0, 1);
|
||||
mInfo("tq timer, rebalance counter old val:%d", old);
|
||||
mDebug("tq timer, rebalance counter old val:%d", old);
|
||||
return old == 0;
|
||||
}
|
||||
|
||||
|
@ -116,7 +116,7 @@ void mndRebCntDec() {
|
|||
int32_t newVal = val - 1;
|
||||
int32_t oldVal = atomic_val_compare_exchange_32(&mqRebInExecCnt, val, newVal);
|
||||
if (oldVal == val) {
|
||||
mInfo("rebalance trans end, rebalance counter:%d", newVal);
|
||||
mDebug("rebalance trans end, rebalance counter:%d", newVal);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -420,6 +420,7 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
|
|||
|
||||
SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, pConsumer->cgroup, data->topicName);
|
||||
if(pSub == NULL){
|
||||
ASSERT(0);
|
||||
continue;
|
||||
}
|
||||
taosWLockLatch(&pSub->lock);
|
||||
|
@ -515,7 +516,10 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
|||
char *topic = taosArrayGetP(pConsumer->currentTopics, i);
|
||||
SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, pConsumer->cgroup, topic);
|
||||
// txn guarantees pSub is created
|
||||
if(pSub == NULL) continue;
|
||||
if(pSub == NULL) {
|
||||
ASSERT(0);
|
||||
continue;
|
||||
}
|
||||
taosRLockLatch(&pSub->lock);
|
||||
|
||||
SMqSubTopicEp topicEp = {0};
|
||||
|
@ -524,6 +528,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
|||
// 2.1 fetch topic schema
|
||||
SMqTopicObj *pTopic = mndAcquireTopic(pMnode, topic);
|
||||
if(pTopic == NULL) {
|
||||
ASSERT(0);
|
||||
taosRUnLockLatch(&pSub->lock);
|
||||
mndReleaseSubscribe(pMnode, pSub);
|
||||
continue;
|
||||
|
@ -898,7 +903,7 @@ static int32_t mndConsumerActionInsert(SSdb *pSdb, SMqConsumerObj *pConsumer) {
|
|||
mInfo("consumer:0x%" PRIx64 " sub insert, cgroup:%s status:%d(%s) epoch:%d",
|
||||
pConsumer->consumerId, pConsumer->cgroup, pConsumer->status, mndConsumerStatusName(pConsumer->status),
|
||||
pConsumer->epoch);
|
||||
pConsumer->subscribeTime = taosGetTimestampMs();
|
||||
pConsumer->subscribeTime = pConsumer->createTime;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
#define SHOW_STEP_SIZE 100
|
||||
#define SHOW_COLS_STEP_SIZE 4096
|
||||
#define SHOW_PRIVILEGES_STEP_SIZE 2048
|
||||
|
||||
static SShowObj *mndCreateShowObj(SMnode *pMnode, SRetrieveTableReq *pReq);
|
||||
static void mndFreeShowObj(SShowObj *pShow);
|
||||
|
@ -234,6 +235,8 @@ static int32_t mndProcessRetrieveSysTableReq(SRpcMsg *pReq) {
|
|||
|
||||
if(pShow->type == TSDB_MGMT_TABLE_COL){ // expend capacity for ins_columns
|
||||
rowsToRead = SHOW_COLS_STEP_SIZE;
|
||||
} else if (pShow->type == TSDB_MGMT_TABLE_PRIVILEGES) {
|
||||
rowsToRead = SHOW_PRIVILEGES_STEP_SIZE;
|
||||
}
|
||||
ShowRetrieveFp retrieveFp = pMgmt->retrieveFps[pShow->type];
|
||||
if (retrieveFp == NULL) {
|
||||
|
|
|
@ -692,6 +692,7 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) {
|
|||
taosArrayDestroy(rebOutput.modifyConsumers);
|
||||
taosArrayDestroy(rebOutput.rebVgs);
|
||||
|
||||
taosHashCancelIterate(pReq->rebSubHash, pIter);
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
mInfo("mq re-balance failed, due to out of memory");
|
||||
taosHashCleanup(pReq->rebSubHash);
|
||||
|
@ -1168,7 +1169,7 @@ static int32_t buildResult(SSDataBlock *pBlock, int32_t* numOfRows, int64_t cons
|
|||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, *numOfRows, (const char *)consumerIdHex, consumerId == -1);
|
||||
|
||||
mDebug("mnd show subscriptions: topic %s, consumer:0x%" PRIx64 " cgroup %s vgid %d", varDataVal(topic),
|
||||
mInfo("mnd show subscriptions: topic %s, consumer:0x%" PRIx64 " cgroup %s vgid %d", varDataVal(topic),
|
||||
consumerId, varDataVal(cgroup), pVgEp->vgId);
|
||||
|
||||
// offset
|
||||
|
|
|
@ -1174,26 +1174,30 @@ static void mndLoopHash(SHashObj *hash, char *priType, SSDataBlock *pBlock, int3
|
|||
if (strcmp("t", value) != 0) {
|
||||
SNode *pAst = NULL;
|
||||
int32_t sqlLen = 0;
|
||||
char sql[TSDB_EXPLAIN_RESULT_ROW_SIZE] = {0};
|
||||
size_t bufSz = strlen(value) + 1;
|
||||
char* sql = taosMemoryMalloc(bufSz + 1);
|
||||
char* obj = taosMemoryMalloc(TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE);
|
||||
|
||||
if (nodesStringToNode(value, &pAst) == 0) {
|
||||
nodesNodeToSQL(pAst, sql, TSDB_EXPLAIN_RESULT_ROW_SIZE, &sqlLen);
|
||||
if (sql != NULL && obj != NULL && nodesStringToNode(value, &pAst) == 0) {
|
||||
nodesNodeToSQL(pAst, sql, bufSz, &sqlLen);
|
||||
nodesDestroyNode(pAst);
|
||||
} else {
|
||||
sqlLen = 5;
|
||||
sprintf(sql, "error");
|
||||
}
|
||||
|
||||
char obj[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(obj, sql, pShow->pMeta->pSchemas[cols].bytes);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, *numOfRows, (const char *)obj, false);
|
||||
taosMemoryFree(obj);
|
||||
taosMemoryFree(sql);
|
||||
} else {
|
||||
char condition[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
char* condition = taosMemoryMalloc(TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE);
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(condition, "", pShow->pMeta->pSchemas[cols].bytes);
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, *numOfRows, (const char *)condition, false);
|
||||
taosMemoryFree(condition);
|
||||
}
|
||||
|
||||
(*numOfRows)++;
|
||||
|
@ -1209,16 +1213,34 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
|||
int32_t cols = 0;
|
||||
char *pWrite;
|
||||
|
||||
bool fetchNextUser = pShow->restore ? false : true;
|
||||
pShow->restore = false;
|
||||
|
||||
while (numOfRows < rows) {
|
||||
pShow->pIter = sdbFetch(pSdb, SDB_USER, pShow->pIter, (void **)&pUser);
|
||||
if (pShow->pIter == NULL) break;
|
||||
if (fetchNextUser) {
|
||||
pShow->pIter = sdbFetch(pSdb, SDB_USER, pShow->pIter, (void **)&pUser);
|
||||
if (pShow->pIter == NULL) break;
|
||||
} else {
|
||||
fetchNextUser = true;
|
||||
void *pKey = taosHashGetKey(pShow->pIter, NULL);
|
||||
pUser = sdbAcquire(pSdb, SDB_USER, pKey);
|
||||
if (!pUser) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t numOfReadDbs = taosHashGetSize(pUser->readDbs);
|
||||
int32_t numOfWriteDbs = taosHashGetSize(pUser->writeDbs);
|
||||
int32_t numOfTopics = taosHashGetSize(pUser->topics);
|
||||
int32_t numOfReadTbs = taosHashGetSize(pUser->readTbs);
|
||||
int32_t numOfWriteTbs = taosHashGetSize(pUser->writeTbs);
|
||||
if (numOfRows + numOfReadDbs + numOfWriteDbs + numOfTopics + numOfReadTbs + numOfWriteTbs >= rows) break;
|
||||
if (numOfRows + numOfReadDbs + numOfWriteDbs + numOfTopics + numOfReadTbs + numOfWriteTbs >= rows) {
|
||||
mInfo("will restore. current num of rows: %d, read dbs %d, write dbs %d, topics %d, read tables %d, write tables %d",
|
||||
numOfRows, numOfReadDbs, numOfWriteDbs, numOfTopics, numOfReadTbs, numOfWriteTbs);
|
||||
pShow->restore = true;
|
||||
sdbRelease(pSdb, pUser);
|
||||
break;
|
||||
}
|
||||
|
||||
if (pUser->superUser) {
|
||||
cols = 0;
|
||||
|
@ -1242,10 +1264,11 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
|||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, numOfRows, (const char *)tableName, false);
|
||||
|
||||
char condition[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
char* condition = taosMemoryMalloc(TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE);
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(condition, "", pShow->pMeta->pSchemas[cols].bytes);
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, numOfRows, (const char *)condition, false);
|
||||
taosMemoryFree(condition);
|
||||
|
||||
numOfRows++;
|
||||
}
|
||||
|
@ -1276,10 +1299,11 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
|||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, numOfRows, (const char *)tableName, false);
|
||||
|
||||
char condition[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
char* condition = taosMemoryMalloc(TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE);
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(condition, "", pShow->pMeta->pSchemas[cols].bytes);
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, numOfRows, (const char *)condition, false);
|
||||
taosMemoryFree(condition);
|
||||
|
||||
numOfRows++;
|
||||
db = taosHashIterate(pUser->readDbs, db);
|
||||
|
@ -1311,10 +1335,11 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
|||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, numOfRows, (const char *)tableName, false);
|
||||
|
||||
char condition[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
char* condition = taosMemoryMalloc(TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE);
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(condition, "", pShow->pMeta->pSchemas[cols].bytes);
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, numOfRows, (const char *)condition, false);
|
||||
taosMemoryFree(condition);
|
||||
|
||||
numOfRows++;
|
||||
db = taosHashIterate(pUser->writeDbs, db);
|
||||
|
@ -1348,10 +1373,11 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
|||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, numOfRows, (const char *)tableName, false);
|
||||
|
||||
char condition[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
char* condition = taosMemoryMalloc(TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE);
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(condition, "", pShow->pMeta->pSchemas[cols].bytes);
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, numOfRows, (const char *)condition, false);
|
||||
taosMemoryFree(condition);
|
||||
|
||||
numOfRows++;
|
||||
topic = taosHashIterate(pUser->topics, topic);
|
||||
|
|
|
@ -346,9 +346,9 @@ int32_t sndProcessStreamTaskCheckReq(SSnode *pSnode, SRpcMsg *pMsg) {
|
|||
rsp.status = streamTaskCheckStatus(pTask);
|
||||
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
||||
|
||||
qDebug("s-task:%s recv task check req(reqId:0x%" PRIx64 ") task:0x%x (vgId:%d), status:%s, rsp status %d",
|
||||
pTask->id.idStr, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId,
|
||||
streamGetTaskStatusStr(pTask->status.taskStatus), rsp.status);
|
||||
const char* pStatus = streamGetTaskStatusStr(pTask->status.taskStatus);
|
||||
qDebug("s-task:%s status:%s, recv task check req(reqId:0x%" PRIx64 ") task:0x%x (vgId:%d), ready:%d",
|
||||
pTask->id.idStr, pStatus, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
||||
} else {
|
||||
rsp.status = 0;
|
||||
qDebug("tq recv task check(taskId:0x%x not built yet) req(reqId:0x%" PRIx64
|
||||
|
|
|
@ -78,6 +78,8 @@ void initStateStoreAPI(SStateStore* pStore) {
|
|||
pStore->updateInfoIsUpdated = updateInfoIsUpdated;
|
||||
pStore->updateInfoIsTableInserted = updateInfoIsTableInserted;
|
||||
pStore->updateInfoDestroy = updateInfoDestroy;
|
||||
pStore->windowSBfDelete = windowSBfDelete;
|
||||
pStore->windowSBfAdd = windowSBfAdd;
|
||||
|
||||
pStore->updateInfoInitP = updateInfoInitP;
|
||||
pStore->updateInfoAddCloseWindowSBF = updateInfoAddCloseWindowSBF;
|
||||
|
|
|
@ -841,48 +841,6 @@ typedef enum {
|
|||
READ_MODE_ALL,
|
||||
} EReadMode;
|
||||
|
||||
typedef struct STsdbReaderInfo {
|
||||
uint64_t suid;
|
||||
STSchema *pSchema;
|
||||
EReadMode readMode;
|
||||
uint64_t rowsNum;
|
||||
STimeWindow window;
|
||||
SVersionRange verRange;
|
||||
int16_t order;
|
||||
} STsdbReaderInfo;
|
||||
|
||||
typedef struct {
|
||||
SArray *pTombData;
|
||||
} STableLoadInfo;
|
||||
|
||||
struct SDataFileReader;
|
||||
|
||||
typedef struct SCacheRowsReader {
|
||||
STsdb *pTsdb;
|
||||
STsdbReaderInfo info;
|
||||
TdThreadMutex readerMutex;
|
||||
SVnode *pVnode;
|
||||
STSchema *pSchema;
|
||||
STSchema *pCurrSchema;
|
||||
uint64_t uid;
|
||||
char **transferBuf; // todo remove it soon
|
||||
int32_t numOfCols;
|
||||
SArray *pCidList;
|
||||
int32_t *pSlotIds;
|
||||
int32_t type;
|
||||
int32_t tableIndex; // currently returned result tables
|
||||
STableKeyInfo *pTableList; // table id list
|
||||
int32_t numOfTables;
|
||||
uint64_t *uidList;
|
||||
SSHashObj *pTableMap;
|
||||
SArray *pLDataIterArray;
|
||||
struct SDataFileReader *pFileReader;
|
||||
STFileSet *pCurFileSet;
|
||||
STsdbReadSnap *pReadSnap;
|
||||
char *idstr;
|
||||
int64_t lastTs;
|
||||
} SCacheRowsReader;
|
||||
|
||||
typedef struct {
|
||||
TSKEY ts;
|
||||
int8_t dirty;
|
||||
|
@ -892,14 +850,10 @@ typedef struct {
|
|||
int32_t tsdbOpenCache(STsdb *pTsdb);
|
||||
void tsdbCloseCache(STsdb *pTsdb);
|
||||
int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *row);
|
||||
int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int8_t ltype);
|
||||
int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int8_t ltype);
|
||||
int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey);
|
||||
|
||||
int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, TSDBROW *row, STsdb *pTsdb);
|
||||
int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, TSDBROW *row, bool dup);
|
||||
int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, LRUHandle **h);
|
||||
int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, LRUHandle **h);
|
||||
int32_t tsdbCacheRelease(SLRUCache *pCache, LRUHandle *h);
|
||||
|
||||
int32_t tsdbCacheGetBlockIdx(SLRUCache *pCache, SDataFReader *pFileReader, LRUHandle **handle);
|
||||
|
@ -909,8 +863,6 @@ int32_t tsdbCacheDeleteLastrow(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
|
|||
int32_t tsdbCacheDeleteLast(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
|
||||
int32_t tsdbCacheDelete(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
|
||||
|
||||
// int32_t tsdbCacheLastArray2Row(SArray *pLastArray, STSRow **ppRow, STSchema *pSchema);
|
||||
|
||||
// ========== inline functions ==========
|
||||
static FORCE_INLINE int32_t tsdbKeyCmprFn(const void *p1, const void *p2) {
|
||||
TSDBKEY *pKey1 = (TSDBKEY *)p1;
|
||||
|
|
|
@ -146,6 +146,20 @@ void tqClose(STQ* pTq) {
|
|||
return;
|
||||
}
|
||||
|
||||
void* pIter = taosHashIterate(pTq->pPushMgr, NULL);
|
||||
while (pIter) {
|
||||
STqHandle* pHandle = *(STqHandle**)pIter;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
||||
if(pHandle->msg != NULL) {
|
||||
tqPushEmptyDataRsp(pHandle, vgId);
|
||||
rpcFreeCont(pHandle->msg->pCont);
|
||||
taosMemoryFree(pHandle->msg);
|
||||
pHandle->msg = NULL;
|
||||
}
|
||||
pIter = taosHashIterate(pTq->pPushMgr, pIter);
|
||||
}
|
||||
|
||||
tqOffsetClose(pTq->pOffsetStore);
|
||||
taosHashCleanup(pTq->pHandle);
|
||||
taosHashCleanup(pTq->pPushMgr);
|
||||
|
@ -278,6 +292,10 @@ int32_t tqPushEmptyDataRsp(STqHandle* pHandle, int32_t vgId) {
|
|||
tqInitDataRsp(&dataRsp, &req);
|
||||
dataRsp.blockNum = 0;
|
||||
dataRsp.rspOffset = dataRsp.reqOffset;
|
||||
char buf[TSDB_OFFSET_LEN] = {0};
|
||||
tFormatOffset(buf, TSDB_OFFSET_LEN, &dataRsp.reqOffset);
|
||||
tqInfo("tqPushEmptyDataRsp to consumer:0x%"PRIx64 " vgId:%d, offset:%s, reqId:0x%" PRIx64, req.consumerId, vgId, buf, req.reqId);
|
||||
|
||||
tqSendDataRsp(pHandle, pHandle->msg, &req, &dataRsp, TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
tDeleteMqDataRsp(&dataRsp);
|
||||
return 0;
|
||||
|
@ -336,10 +354,10 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t sversion, char* msg, int32_t
|
|||
STqOffset* pOffset = &vgOffset.offset;
|
||||
|
||||
if (pOffset->val.type == TMQ_OFFSET__SNAPSHOT_DATA || pOffset->val.type == TMQ_OFFSET__SNAPSHOT_META) {
|
||||
tqInfo("receive offset commit msg to %s on vgId:%d, offset(type:snapshot) uid:%" PRId64 ", ts:%" PRId64,
|
||||
tqDebug("receive offset commit msg to %s on vgId:%d, offset(type:snapshot) uid:%" PRId64 ", ts:%" PRId64,
|
||||
pOffset->subKey, vgId, pOffset->val.uid, pOffset->val.ts);
|
||||
} else if (pOffset->val.type == TMQ_OFFSET__LOG) {
|
||||
tqInfo("receive offset commit msg to %s on vgId:%d, offset(type:log) version:%" PRId64, pOffset->subKey, vgId,
|
||||
tqDebug("receive offset commit msg to %s on vgId:%d, offset(type:log) version:%" PRId64, pOffset->subKey, vgId,
|
||||
pOffset->val.version);
|
||||
} else {
|
||||
tqError("invalid commit offset type:%d", pOffset->val.type);
|
||||
|
@ -367,12 +385,13 @@ int32_t tqProcessSeekReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
SRpcMsg rsp = {.info = pMsg->info};
|
||||
int code = 0;
|
||||
|
||||
tqDebug("tmq seek: consumer:0x%" PRIx64 " vgId:%d, subkey %s", req.consumerId, vgId, req.subKey);
|
||||
if (tDeserializeSMqSeekReq(pMsg->pCont, pMsg->contLen, &req) < 0) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto end;
|
||||
}
|
||||
|
||||
tqDebug("tmq seek: consumer:0x%" PRIx64 " vgId:%d, subkey %s", req.consumerId, vgId, req.subKey);
|
||||
|
||||
STqHandle* pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey));
|
||||
if (pHandle == NULL) {
|
||||
tqWarn("tmq seek: consumer:0x%" PRIx64 " vgId:%d subkey %s not found", req.consumerId, vgId, req.subKey);
|
||||
|
@ -515,10 +534,11 @@ int32_t tqProcessPollPush(STQ* pTq, SRpcMsg* pMsg) {
|
|||
|
||||
while (pIter) {
|
||||
STqHandle* pHandle = *(STqHandle**)pIter;
|
||||
tqDebug("vgId:%d start set submit for pHandle:%p, consumer:0x%" PRIx64, vgId, pHandle, pHandle->consumerId);
|
||||
tqInfo("vgId:%d start set submit for pHandle:%p, consumer:0x%" PRIx64, vgId, pHandle, pHandle->consumerId);
|
||||
|
||||
if (ASSERT(pHandle->msg != NULL)) {
|
||||
tqError("pHandle->msg should not be null");
|
||||
taosHashCancelIterate(pTq->pPushMgr, pIter);
|
||||
break;
|
||||
}else{
|
||||
SRpcMsg msg = {.msgType = TDMT_VND_TMQ_CONSUME, .pCont = pHandle->msg->pCont, .contLen = pHandle->msg->contLen, .info = pHandle->msg->info};
|
||||
|
@ -556,10 +576,18 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
// 1. find handle
|
||||
pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey));
|
||||
if (pHandle == NULL) {
|
||||
tqError("tmq poll: consumer:0x%" PRIx64 " vgId:%d subkey %s not found", consumerId, vgId, req.subKey);
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
taosWUnLockLatch(&pTq->lock);
|
||||
return -1;
|
||||
do{
|
||||
if (tqMetaGetHandle(pTq, req.subKey) == 0){
|
||||
pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey));
|
||||
if(pHandle != NULL){
|
||||
break;
|
||||
}
|
||||
}
|
||||
tqError("tmq poll: consumer:0x%" PRIx64 " vgId:%d subkey %s not found", consumerId, vgId, req.subKey);
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
taosWUnLockLatch(&pTq->lock);
|
||||
return -1;
|
||||
}while(0);
|
||||
}
|
||||
|
||||
// 2. check re-balance status
|
||||
|
@ -849,30 +877,28 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
|||
taosWLockLatch(&pTq->lock);
|
||||
|
||||
if (pHandle->consumerId == req.newConsumerId) { // do nothing
|
||||
tqInfo("vgId:%d consumer:0x%" PRIx64 " remains, no switch occurs, should not reach here", req.vgId,
|
||||
req.newConsumerId);
|
||||
tqInfo("vgId:%d no switch consumer:0x%" PRIx64 " remains, because redo wal log", req.vgId, req.newConsumerId);
|
||||
} else {
|
||||
tqInfo("vgId:%d switch consumer from Id:0x%" PRIx64 " to Id:0x%" PRIx64, req.vgId, pHandle->consumerId,
|
||||
req.newConsumerId);
|
||||
tqInfo("vgId:%d switch consumer from Id:0x%" PRIx64 " to Id:0x%" PRIx64, req.vgId, pHandle->consumerId, req.newConsumerId);
|
||||
atomic_store_64(&pHandle->consumerId, req.newConsumerId);
|
||||
// atomic_add_fetch_32(&pHandle->epoch, 1);
|
||||
|
||||
// kill executing task
|
||||
// if(tqIsHandleExec(pHandle)) {
|
||||
// qTaskInfo_t pTaskInfo = pHandle->execHandle.task;
|
||||
// if (pTaskInfo != NULL) {
|
||||
// qKillTask(pTaskInfo, TSDB_CODE_SUCCESS);
|
||||
// }
|
||||
|
||||
// if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
// qStreamCloseTsdbReader(pTaskInfo);
|
||||
// }
|
||||
// }
|
||||
// remove if it has been register in the push manager, and return one empty block to consumer
|
||||
tqUnregisterPushHandle(pTq, pHandle);
|
||||
ret = tqMetaSaveHandle(pTq, req.subKey, pHandle);
|
||||
}
|
||||
// atomic_add_fetch_32(&pHandle->epoch, 1);
|
||||
|
||||
// kill executing task
|
||||
// if(tqIsHandleExec(pHandle)) {
|
||||
// qTaskInfo_t pTaskInfo = pHandle->execHandle.task;
|
||||
// if (pTaskInfo != NULL) {
|
||||
// qKillTask(pTaskInfo, TSDB_CODE_SUCCESS);
|
||||
// }
|
||||
|
||||
// if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
// qStreamCloseTsdbReader(pTaskInfo);
|
||||
// }
|
||||
// }
|
||||
// remove if it has been register in the push manager, and return one empty block to consumer
|
||||
tqUnregisterPushHandle(pTq, pHandle);
|
||||
taosWUnLockLatch(&pTq->lock);
|
||||
ret = tqMetaSaveHandle(pTq, req.subKey, pHandle);
|
||||
}
|
||||
|
||||
end:
|
||||
|
@ -1041,9 +1067,9 @@ int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
rsp.status = streamTaskCheckStatus(pTask);
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
|
||||
tqDebug("s-task:%s recv task check req(reqId:0x%" PRIx64 ") task:0x%x (vgId:%d), status:%s, rsp status %d",
|
||||
pTask->id.idStr, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId,
|
||||
streamGetTaskStatusStr(pTask->status.taskStatus), rsp.status);
|
||||
const char* pStatus = streamGetTaskStatusStr(pTask->status.taskStatus);
|
||||
tqDebug("s-task:%s status:%s, recv task check req(reqId:0x%" PRIx64 ") task:0x%x (vgId:%d), ready:%d",
|
||||
pTask->id.idStr, pStatus, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
||||
} else {
|
||||
rsp.status = 0;
|
||||
tqDebug("tq recv task check(taskId:0x%x not built yet) req(reqId:0x%" PRIx64 ") from task:0x%x (vgId:%d), rsp status %d",
|
||||
|
@ -1145,7 +1171,6 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms
|
|||
// 3. It's an fill history task, do nothing. wait for the main task to start it
|
||||
SStreamTask* p = streamMetaAcquireTask(pStreamMeta, taskId);
|
||||
if (p != NULL) { // reset the downstreamReady flag.
|
||||
p->status.downstreamReady = 0;
|
||||
streamTaskCheckDownstreamTasks(p);
|
||||
}
|
||||
|
||||
|
@ -1154,12 +1179,10 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms
|
|||
}
|
||||
|
||||
int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
char* msg = pMsg->pCont;
|
||||
|
||||
SStreamScanHistoryReq* pReq = (SStreamScanHistoryReq*)pMsg->pCont;
|
||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||
SStreamScanHistoryReq* pReq = (SStreamScanHistoryReq*)msg;
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->taskId);
|
||||
if (pTask == NULL) {
|
||||
tqError("vgId:%d failed to acquire stream task:0x%x during stream recover, task may have been destroyed",
|
||||
|
@ -1167,12 +1190,20 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
// do recovery step 1
|
||||
// do recovery step1
|
||||
const char* id = pTask->id.idStr;
|
||||
const char* pStatus = streamGetTaskStatusStr(pTask->status.taskStatus);
|
||||
tqDebug("s-task:%s start history data scan stage(step 1), status:%s", id, pStatus);
|
||||
tqDebug("s-task:%s start scan-history stage(step 1), status:%s", id, pStatus);
|
||||
|
||||
int64_t st = taosGetTimestampMs();
|
||||
if (pTask->tsInfo.step1Start == 0) {
|
||||
ASSERT(pTask->status.pauseAllowed == false);
|
||||
pTask->tsInfo.step1Start = taosGetTimestampMs();
|
||||
if (pTask->info.fillHistory == 1) {
|
||||
streamTaskEnablePause(pTask);
|
||||
}
|
||||
} else {
|
||||
tqDebug("s-task:%s resume from paused, start ts:%"PRId64, pTask->id.idStr, pTask->tsInfo.step1Start);
|
||||
}
|
||||
|
||||
// we have to continue retrying to successfully execute the scan history task.
|
||||
int8_t schedStatus = atomic_val_compare_exchange_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE,
|
||||
|
@ -1185,31 +1216,21 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
ASSERT(pTask->status.pauseAllowed == false);
|
||||
|
||||
if (pTask->info.fillHistory == 1) {
|
||||
streamTaskEnablePause(pTask);
|
||||
ASSERT(pTask->status.pauseAllowed == true);
|
||||
}
|
||||
|
||||
if (!streamTaskRecoverScanStep1Finished(pTask)) {
|
||||
streamSourceScanHistoryData(pTask);
|
||||
}
|
||||
|
||||
// disable the pause when handling the step2 scan of tsdb data.
|
||||
// the whole next procedure cann't be stopped.
|
||||
// todo fix it: the following procedure should be executed completed and then shutdown when trying to close vnode.
|
||||
if (pTask->info.fillHistory == 1) {
|
||||
streamTaskDisablePause(pTask);
|
||||
}
|
||||
|
||||
if (streamTaskShouldStop(&pTask->status) || streamTaskShouldPause(&pTask->status)) {
|
||||
tqDebug("s-task:%s is dropped or paused, abort recover in step1", id);
|
||||
streamSourceScanHistoryData(pTask);
|
||||
if (pTask->status.taskStatus == TASK_STATUS__PAUSE) {
|
||||
double el = (taosGetTimestampMs() - pTask->tsInfo.step1Start) / 1000.0;
|
||||
tqDebug("s-task:%s is paused in the step1, elapsed time:%.2fs, sched-status:%d", pTask->id.idStr, el,
|
||||
TASK_SCHED_STATUS__INACTIVE);
|
||||
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
double el = (taosGetTimestampMs() - st) / 1000.0;
|
||||
// the following procedure should be executed, no matter status is stop/pause or not
|
||||
double el = (taosGetTimestampMs() - pTask->tsInfo.step1Start) / 1000.0;
|
||||
tqDebug("s-task:%s scan-history stage(step 1) ended, elapsed time:%.2fs", id, el);
|
||||
|
||||
if (pTask->info.fillHistory) {
|
||||
|
@ -1217,77 +1238,71 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
SStreamTask* pStreamTask = NULL;
|
||||
bool done = false;
|
||||
|
||||
if (!pReq->igUntreated && !streamTaskRecoverScanStep1Finished(pTask)) {
|
||||
// 1. stop the related stream task, get the current scan wal version of stream task, ver.
|
||||
pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.taskId);
|
||||
if (pStreamTask == NULL) {
|
||||
qError("failed to find s-task:0x%x, it may have been destroyed, drop fill-history task:%s",
|
||||
pTask->streamTaskId.taskId, pTask->id.idStr);
|
||||
// 1. get the related stream task
|
||||
pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.taskId);
|
||||
if (pStreamTask == NULL) {
|
||||
// todo delete this task, if the related stream task is dropped
|
||||
qError("failed to find s-task:0x%x, it may have been destroyed, drop fill-history task:%s",
|
||||
pTask->streamTaskId.taskId, pTask->id.idStr);
|
||||
|
||||
pTask->status.taskStatus = TASK_STATUS__DROPPING;
|
||||
tqDebug("s-task:%s fill-history task set status to be dropping", id);
|
||||
tqDebug("s-task:%s fill-history task set status to be dropping", id);
|
||||
|
||||
streamMetaSaveTask(pMeta, pTask);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ASSERT(pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE);
|
||||
|
||||
// stream task in TASK_STATUS__SCAN_HISTORY can not be paused.
|
||||
// wait for the stream task get ready for scan history data
|
||||
while (pStreamTask->status.taskStatus == TASK_STATUS__SCAN_HISTORY) {
|
||||
tqDebug(
|
||||
"s-task:%s level:%d related stream task:%s(status:%s) not ready for halt, wait for it and recheck in 100ms",
|
||||
id, pTask->info.taskLevel, pStreamTask->id.idStr, streamGetTaskStatusStr(pStreamTask->status.taskStatus));
|
||||
taosMsleep(100);
|
||||
}
|
||||
|
||||
// now we can stop the stream task execution
|
||||
streamTaskHalt(pStreamTask);
|
||||
tqDebug("s-task:%s level:%d sched-status:%d is halt by fill-history task:%s", pStreamTask->id.idStr,
|
||||
pStreamTask->info.taskLevel, pStreamTask->status.schedStatus, id);
|
||||
|
||||
// if it's an source task, extract the last version in wal.
|
||||
pRange = &pTask->dataRange.range;
|
||||
int64_t latestVer = walReaderGetCurrentVer(pStreamTask->exec.pWalReader);
|
||||
done = streamHistoryTaskSetVerRangeStep2(pTask, latestVer);
|
||||
streamMetaUnregisterTask(pMeta, pTask->id.taskId);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ASSERT(pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE);
|
||||
|
||||
// 2. it cannot be paused, when the stream task in TASK_STATUS__SCAN_HISTORY status. Let's wait for the
|
||||
// stream task get ready for scan history data
|
||||
while (pStreamTask->status.taskStatus == TASK_STATUS__SCAN_HISTORY) {
|
||||
tqDebug(
|
||||
"s-task:%s level:%d related stream task:%s(status:%s) not ready for halt, wait for it and recheck in 100ms",
|
||||
id, pTask->info.taskLevel, pStreamTask->id.idStr, streamGetTaskStatusStr(pStreamTask->status.taskStatus));
|
||||
taosMsleep(100);
|
||||
}
|
||||
|
||||
// now we can stop the stream task execution
|
||||
streamTaskHalt(pStreamTask);
|
||||
|
||||
tqDebug("s-task:%s level:%d sched-status:%d is halt by fill-history task:%s", pStreamTask->id.idStr,
|
||||
pStreamTask->info.taskLevel, pStreamTask->status.schedStatus, id);
|
||||
|
||||
// if it's an source task, extract the last version in wal.
|
||||
pRange = &pTask->dataRange.range;
|
||||
int64_t latestVer = walReaderGetCurrentVer(pStreamTask->exec.pWalReader);
|
||||
done = streamHistoryTaskSetVerRangeStep2(pTask, latestVer);
|
||||
|
||||
if (done) {
|
||||
pTask->tsInfo.step2Start = taosGetTimestampMs();
|
||||
streamTaskEndScanWAL(pTask);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
} else {
|
||||
if (!streamTaskRecoverScanStep1Finished(pTask)) {
|
||||
STimeWindow* pWindow = &pTask->dataRange.window;
|
||||
tqDebug("s-task:%s level:%d verRange:%" PRId64 " - %" PRId64 " window:%" PRId64 "-%" PRId64
|
||||
", do secondary scan-history from WAL after halt the related stream task:%s",
|
||||
id, pTask->info.taskLevel, pRange->minVer, pRange->maxVer, pWindow->skey, pWindow->ekey, id);
|
||||
ASSERT(pTask->status.schedStatus == TASK_SCHED_STATUS__WAITING);
|
||||
STimeWindow* pWindow = &pTask->dataRange.window;
|
||||
tqDebug("s-task:%s level:%d verRange:%" PRId64 " - %" PRId64 " window:%" PRId64 "-%" PRId64
|
||||
", do secondary scan-history from WAL after halt the related stream task:%s",
|
||||
id, pTask->info.taskLevel, pRange->minVer, pRange->maxVer, pWindow->skey, pWindow->ekey,
|
||||
pStreamTask->id.idStr);
|
||||
ASSERT(pTask->status.schedStatus == TASK_SCHED_STATUS__WAITING);
|
||||
|
||||
pTask->tsInfo.step2Start = taosGetTimestampMs();
|
||||
streamSetParamForStreamScannerStep2(pTask, pRange, pWindow);
|
||||
}
|
||||
pTask->tsInfo.step2Start = taosGetTimestampMs();
|
||||
streamSetParamForStreamScannerStep2(pTask, pRange, pWindow);
|
||||
|
||||
if (!streamTaskRecoverScanStep2Finished(pTask)) {
|
||||
pTask->status.taskStatus = TASK_STATUS__SCAN_HISTORY_WAL;
|
||||
if (streamTaskShouldStop(&pTask->status) || streamTaskShouldPause(&pTask->status)) {
|
||||
tqDebug("s-task:%s is dropped or paused, abort recover in step1", id);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return 0;
|
||||
}
|
||||
int64_t dstVer = pTask->dataRange.range.minVer - 1;
|
||||
|
||||
int64_t dstVer = pTask->dataRange.range.minVer - 1;
|
||||
|
||||
pTask->chkInfo.currentVer = dstVer;
|
||||
walReaderSetSkipToVersion(pTask->exec.pWalReader, dstVer);
|
||||
tqDebug("s-task:%s wal reader start scan from WAL ver:%" PRId64 ", set sched-status:%d", id, dstVer,
|
||||
TASK_SCHED_STATUS__INACTIVE);
|
||||
}
|
||||
pTask->chkInfo.currentVer = dstVer;
|
||||
walReaderSetSkipToVersion(pTask->exec.pWalReader, dstVer);
|
||||
tqDebug("s-task:%s wal reader start scan WAL verRange:%" PRId64 "-%" PRId64 ", set sched-status:%d", id, dstVer,
|
||||
pTask->dataRange.range.maxVer, TASK_SCHED_STATUS__INACTIVE);
|
||||
|
||||
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE);
|
||||
|
||||
// set the fill-history task to be normal
|
||||
if (pTask->info.fillHistory == 1) {
|
||||
streamSetStatusNormal(pTask);
|
||||
}
|
||||
|
||||
// 4. 1) transfer the ownership of executor state, 2) update the scan data range for source task.
|
||||
// 5. resume the related stream task.
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
|
@ -1304,10 +1319,10 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
if (pTask->historyTaskId.taskId == 0) {
|
||||
*pWindow = (STimeWindow){INT64_MIN, INT64_MAX};
|
||||
tqDebug(
|
||||
"s-task:%s scanhistory in stream time window completed, no related fill-history task, reset the time "
|
||||
"s-task:%s scan-history in stream time window completed, no related fill-history task, reset the time "
|
||||
"window:%" PRId64 " - %" PRId64,
|
||||
id, pWindow->skey, pWindow->ekey);
|
||||
qResetStreamInfoTimeWindow(pTask->exec.pExecutor);
|
||||
qStreamInfoResetTimewindowFilter(pTask->exec.pExecutor);
|
||||
} else {
|
||||
// when related fill-history task exists, update the fill-history time window only when the
|
||||
// state transfer is completed.
|
||||
|
@ -1500,7 +1515,7 @@ int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
if (pTask != NULL) {
|
||||
// even in halt status, the data in inputQ must be processed
|
||||
int8_t st = pTask->status.taskStatus;
|
||||
if (st == TASK_STATUS__NORMAL || st == TASK_STATUS__SCAN_HISTORY || st == TASK_STATUS__SCAN_HISTORY_WAL) {
|
||||
if (st == TASK_STATUS__NORMAL || st == TASK_STATUS__SCAN_HISTORY/* || st == TASK_STATUS__SCAN_HISTORY_WAL*/) {
|
||||
tqDebug("vgId:%d s-task:%s start to process block from inputQ, last chk point:%" PRId64, vgId, pTask->id.idStr,
|
||||
pTask->chkInfo.version);
|
||||
streamProcessRunReq(pTask);
|
||||
|
@ -1578,9 +1593,8 @@ int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
|||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->taskId);
|
||||
if (pTask == NULL) {
|
||||
tqError("vgId:%d failed to acquire task:0x%x, it may have been dropped already", pMeta->vgId,
|
||||
tqError("vgId:%d process pause req, failed to acquire task:0x%x, it may have been dropped already", pMeta->vgId,
|
||||
pReq->taskId);
|
||||
|
||||
// since task is in [STOP|DROPPING] state, it is safe to assume the pause is active
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1592,9 +1606,8 @@ int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
|||
if (pTask->historyTaskId.taskId != 0) {
|
||||
pHistoryTask = streamMetaAcquireTask(pMeta, pTask->historyTaskId.taskId);
|
||||
if (pHistoryTask == NULL) {
|
||||
tqError("vgId:%d failed to acquire fill-history task:0x%x, it may have been dropped already. Pause success",
|
||||
tqError("vgId:%d process pause req, failed to acquire fill-history task:0x%x, it may have been dropped already",
|
||||
pMeta->vgId, pTask->historyTaskId.taskId);
|
||||
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
|
||||
// since task is in [STOP|DROPPING] state, it is safe to assume the pause is active
|
||||
|
@ -1602,14 +1615,12 @@ int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
|
|||
}
|
||||
|
||||
tqDebug("s-task:%s fill-history task handle paused along with related stream task", pHistoryTask->id.idStr);
|
||||
streamTaskPause(pHistoryTask);
|
||||
}
|
||||
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
if (pHistoryTask != NULL) {
|
||||
streamTaskPause(pHistoryTask);
|
||||
streamMetaReleaseTask(pMeta, pHistoryTask);
|
||||
}
|
||||
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1637,8 +1648,8 @@ int32_t tqProcessTaskResumeImpl(STQ* pTq, SStreamTask* pTask, int64_t sversion,
|
|||
vgId, pTask->id.idStr, pTask->chkInfo.currentVer, sversion, pTask->status.schedStatus);
|
||||
}
|
||||
|
||||
if (level == TASK_LEVEL__SOURCE && pTask->info.fillHistory) {
|
||||
streamStartRecoverTask(pTask, igUntreated);
|
||||
if (level == TASK_LEVEL__SOURCE && pTask->info.fillHistory && pTask->status.taskStatus == TASK_STATUS__SCAN_HISTORY) {
|
||||
streamStartScanHistoryAsync(pTask, igUntreated);
|
||||
} else if (level == TASK_LEVEL__SOURCE && (taosQueueItemSize(pTask->inputQueue->queue) == 0)) {
|
||||
tqStartStreamTasks(pTq);
|
||||
} else {
|
||||
|
|
|
@ -338,7 +338,7 @@ static int buildHandle(STQ* pTq, STqHandle* handle){
|
|||
taosArrayDestroy(tbUidList);
|
||||
return -1;
|
||||
}
|
||||
tqDebug("vgId:%d, tq try to get ctb for stb subscribe, suid:%" PRId64, pVnode->config.vgId, handle->execHandle.execTb.suid);
|
||||
tqInfo("vgId:%d, tq try to get ctb for stb subscribe, suid:%" PRId64, pVnode->config.vgId, handle->execHandle.execTb.suid);
|
||||
handle->execHandle.pTqReader = tqReaderOpen(pVnode);
|
||||
tqReaderSetTbUidList(handle->execHandle.pTqReader, tbUidList, NULL);
|
||||
taosArrayDestroy(tbUidList);
|
||||
|
@ -356,7 +356,7 @@ static int restoreHandle(STQ* pTq, void* pVal, int vLen, STqHandle* handle){
|
|||
if(buildHandle(pTq, handle) < 0){
|
||||
return -1;
|
||||
}
|
||||
tqDebug("tq restore %s consumer %" PRId64 " vgId:%d", handle->subKey, handle->consumerId, vgId);
|
||||
tqInfo("tq restore %s consumer %" PRId64 " vgId:%d", handle->subKey, handle->consumerId, vgId);
|
||||
return taosHashPut(pTq->pHandle, handle->subKey, strlen(handle->subKey), handle, sizeof(STqHandle));
|
||||
}
|
||||
|
||||
|
@ -384,7 +384,7 @@ int32_t tqCreateHandle(STQ* pTq, SMqRebVgReq* req, STqHandle* handle){
|
|||
if(buildHandle(pTq, handle) < 0){
|
||||
return -1;
|
||||
}
|
||||
tqDebug("tq restore %s consumer %" PRId64 " vgId:%d", handle->subKey, handle->consumerId, vgId);
|
||||
tqInfo("tq restore %s consumer %" PRId64 " vgId:%d", handle->subKey, handle->consumerId, vgId);
|
||||
return taosHashPut(pTq->pHandle, handle->subKey, strlen(handle->subKey), handle, sizeof(STqHandle));
|
||||
}
|
||||
|
||||
|
|
|
@ -78,12 +78,12 @@ int32_t tqRegisterPushHandle(STQ* pTq, void* handle, SRpcMsg* pMsg) {
|
|||
memcpy(pHandle->msg->pCont, pMsg->pCont, pMsg->contLen);
|
||||
pHandle->msg->contLen = pMsg->contLen;
|
||||
int32_t ret = taosHashPut(pTq->pPushMgr, pHandle->subKey, strlen(pHandle->subKey), &pHandle, POINTER_BYTES);
|
||||
tqDebug("vgId:%d data is over, ret:%d, consumerId:0x%" PRIx64 ", register to pHandle:%p, pCont:%p, len:%d", vgId, ret,
|
||||
tqInfo("vgId:%d data is over, ret:%d, consumerId:0x%" PRIx64 ", register to pHandle:%p, pCont:%p, len:%d", vgId, ret,
|
||||
pHandle->consumerId, pHandle, pHandle->msg->pCont, pHandle->msg->contLen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqUnregisterPushHandle(STQ* pTq, void *handle) {
|
||||
int tqUnregisterPushHandle(STQ* pTq, void *handle) {
|
||||
STqHandle *pHandle = (STqHandle*)handle;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
||||
|
@ -91,7 +91,7 @@ int32_t tqUnregisterPushHandle(STQ* pTq, void *handle) {
|
|||
return 0;
|
||||
}
|
||||
int32_t ret = taosHashRemove(pTq->pPushMgr, pHandle->subKey, strlen(pHandle->subKey));
|
||||
tqDebug("vgId:%d remove pHandle:%p,ret:%d consumer Id:0x%" PRIx64, vgId, pHandle, ret, pHandle->consumerId);
|
||||
tqInfo("vgId:%d remove pHandle:%p,ret:%d consumer Id:0x%" PRIx64, vgId, pHandle, ret, pHandle->consumerId);
|
||||
|
||||
if(pHandle->msg != NULL) {
|
||||
// tqPushDataRsp(pHandle, vgId);
|
||||
|
|
|
@ -211,7 +211,7 @@ int32_t doSetOffsetForWalReader(SStreamTask *pTask, int32_t vgId) {
|
|||
|
||||
static void checkForFillHistoryVerRange(SStreamTask* pTask, int64_t ver) {
|
||||
if ((pTask->info.fillHistory == 1) && ver > pTask->dataRange.range.maxVer) {
|
||||
qWarn("s-task:%s fill-history scan WAL, currentVer:%" PRId64 "reach the maximum ver:%" PRId64
|
||||
qWarn("s-task:%s fill-history scan WAL, currentVer:%" PRId64 " reach the maximum ver:%" PRId64
|
||||
", not scan wal anymore, set the transfer state flag",
|
||||
pTask->id.idStr, ver, pTask->dataRange.range.maxVer);
|
||||
pTask->status.transferState = true;
|
||||
|
@ -251,19 +251,19 @@ int32_t createStreamTaskRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle) {
|
|||
int32_t status = pTask->status.taskStatus;
|
||||
|
||||
// non-source or fill-history tasks don't need to response the WAL scan action.
|
||||
if (pTask->info.taskLevel != TASK_LEVEL__SOURCE) {
|
||||
if ((pTask->info.taskLevel != TASK_LEVEL__SOURCE) || (pTask->status.downstreamReady == 0)) {
|
||||
streamMetaReleaseTask(pStreamMeta, pTask);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (status != TASK_STATUS__NORMAL && status != TASK_STATUS__SCAN_HISTORY_WAL) {
|
||||
if (status != TASK_STATUS__NORMAL) {
|
||||
tqDebug("s-task:%s not ready for new submit block from wal, status:%s", pTask->id.idStr, streamGetTaskStatusStr(status));
|
||||
streamMetaReleaseTask(pStreamMeta, pTask);
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((pTask->info.fillHistory == 1) && pTask->status.transferState) {
|
||||
ASSERT(status == TASK_STATUS__SCAN_HISTORY_WAL);
|
||||
ASSERT(status == TASK_STATUS__NORMAL);
|
||||
// the maximum version of data in the WAL has reached already, the step2 is done
|
||||
tqDebug("s-task:%s fill-history reach the maximum ver:%" PRId64 ", not scan wal anymore", pTask->id.idStr,
|
||||
pTask->dataRange.range.maxVer);
|
||||
|
|
|
@ -317,6 +317,7 @@ int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequ
|
|||
// the offset value can not be monotonious increase??
|
||||
offset = reqOffset;
|
||||
} else {
|
||||
uError("req offset type is 0");
|
||||
return TSDB_CODE_TMQ_INVALID_MSG;
|
||||
}
|
||||
|
||||
|
|
|
@ -1020,63 +1020,15 @@ int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCache
|
|||
code = tsdbCacheLoadFromRocks(pTsdb, uid, pLastArray, remainCols, pr, ltype);
|
||||
|
||||
taosThreadMutexUnlock(&pTsdb->lruMutex);
|
||||
}
|
||||
|
||||
if (remainCols) {
|
||||
taosArrayDestroy(remainCols);
|
||||
if (remainCols) {
|
||||
taosArrayDestroy(remainCols);
|
||||
}
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
/*
|
||||
int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int8_t ltype) {
|
||||
int32_t code = 0;
|
||||
SLRUCache *pCache = pTsdb->lruCache;
|
||||
SArray *pCidList = pr->pCidList;
|
||||
int num_keys = TARRAY_SIZE(pCidList);
|
||||
|
||||
for (int i = 0; i < num_keys; ++i) {
|
||||
SLastCol *pLastCol = NULL;
|
||||
int16_t cid = *(int16_t *)taosArrayGet(pCidList, i);
|
||||
|
||||
SLastKey *key = &(SLastKey){.ltype = ltype, .uid = uid, .cid = cid};
|
||||
LRUHandle *h = taosLRUCacheLookup(pCache, key, ROCKS_KEY_LEN);
|
||||
if (!h) {
|
||||
taosThreadMutexLock(&pTsdb->lruMutex);
|
||||
h = taosLRUCacheLookup(pCache, key, ROCKS_KEY_LEN);
|
||||
if (!h) {
|
||||
pLastCol = tsdbCacheLoadCol(pTsdb, pr, pr->pSlotIds[i], uid, cid, ltype);
|
||||
|
||||
size_t charge = sizeof(*pLastCol);
|
||||
if (IS_VAR_DATA_TYPE(pLastCol->colVal.type)) {
|
||||
charge += pLastCol->colVal.value.nData;
|
||||
}
|
||||
|
||||
LRUStatus status = taosLRUCacheInsert(pCache, key, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter, &h,
|
||||
TAOS_LRU_PRIORITY_LOW, &pTsdb->flushState);
|
||||
if (status != TAOS_LRU_STATUS_OK) {
|
||||
code = -1;
|
||||
}
|
||||
}
|
||||
|
||||
taosThreadMutexUnlock(&pTsdb->lruMutex);
|
||||
}
|
||||
|
||||
pLastCol = (SLastCol *)taosLRUCacheValue(pCache, h);
|
||||
|
||||
SLastCol lastCol = *pLastCol;
|
||||
reallocVarData(&lastCol.colVal);
|
||||
|
||||
if (h) {
|
||||
taosLRUCacheRelease(pCache, h, false);
|
||||
}
|
||||
|
||||
taosArrayPush(pLastArray, &lastCol);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
*/
|
||||
int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey) {
|
||||
int32_t code = 0;
|
||||
// fetch schema
|
||||
|
@ -1108,6 +1060,7 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE
|
|||
char **values_list = taosMemoryCalloc(num_keys * 2, sizeof(char *));
|
||||
size_t *values_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t));
|
||||
char **errs = taosMemoryCalloc(num_keys * 2, sizeof(char *));
|
||||
taosThreadMutexLock(&pTsdb->lruMutex);
|
||||
taosThreadMutexLock(&pTsdb->rCache.rMutex);
|
||||
rocksMayWrite(pTsdb, true, false, false);
|
||||
rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys * 2, (const char *const *)keys_list,
|
||||
|
@ -1137,7 +1090,7 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE
|
|||
rocksdb_free(values_list[i]);
|
||||
rocksdb_free(values_list[i + num_keys]);
|
||||
|
||||
taosThreadMutexLock(&pTsdb->lruMutex);
|
||||
// taosThreadMutexLock(&pTsdb->lruMutex);
|
||||
|
||||
LRUHandle *h = taosLRUCacheLookup(pTsdb->lruCache, keys_list[i], klen);
|
||||
if (h) {
|
||||
|
@ -1159,7 +1112,7 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE
|
|||
}
|
||||
taosLRUCacheErase(pTsdb->lruCache, keys_list[num_keys + i], klen);
|
||||
|
||||
taosThreadMutexUnlock(&pTsdb->lruMutex);
|
||||
// taosThreadMutexUnlock(&pTsdb->lruMutex);
|
||||
}
|
||||
for (int i = 0; i < num_keys; ++i) {
|
||||
taosMemoryFree(keys_list[i]);
|
||||
|
@ -1171,6 +1124,8 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE
|
|||
|
||||
rocksMayWrite(pTsdb, true, false, true);
|
||||
|
||||
taosThreadMutexUnlock(&pTsdb->lruMutex);
|
||||
|
||||
_exit:
|
||||
taosMemoryFree(pTSchema);
|
||||
|
||||
|
@ -1311,62 +1266,7 @@ int32_t tsdbCacheDeleteLast(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey) {
|
|||
|
||||
return code;
|
||||
}
|
||||
/*
|
||||
int32_t tsdbCacheDelete(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey) {
|
||||
int32_t code = 0;
|
||||
char key[32] = {0};
|
||||
int keyLen = 0;
|
||||
|
||||
// getTableCacheKey(uid, "lr", key, &keyLen);
|
||||
getTableCacheKey(uid, 0, key, &keyLen);
|
||||
LRUHandle *h = taosLRUCacheLookup(pCache, key, keyLen);
|
||||
if (h) {
|
||||
SArray *pLast = (SArray *)taosLRUCacheValue(pCache, h);
|
||||
bool invalidate = false;
|
||||
int16_t nCol = taosArrayGetSize(pLast);
|
||||
|
||||
for (int16_t iCol = 0; iCol < nCol; ++iCol) {
|
||||
SLastCol *tTsVal = (SLastCol *)taosArrayGet(pLast, iCol);
|
||||
if (eKey >= tTsVal->ts) {
|
||||
invalidate = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (invalidate) {
|
||||
taosLRUCacheRelease(pCache, h, true);
|
||||
} else {
|
||||
taosLRUCacheRelease(pCache, h, false);
|
||||
}
|
||||
}
|
||||
|
||||
// getTableCacheKey(uid, "l", key, &keyLen);
|
||||
getTableCacheKey(uid, 1, key, &keyLen);
|
||||
h = taosLRUCacheLookup(pCache, key, keyLen);
|
||||
if (h) {
|
||||
SArray *pLast = (SArray *)taosLRUCacheValue(pCache, h);
|
||||
bool invalidate = false;
|
||||
int16_t nCol = taosArrayGetSize(pLast);
|
||||
|
||||
for (int16_t iCol = 0; iCol < nCol; ++iCol) {
|
||||
SLastCol *tTsVal = (SLastCol *)taosArrayGet(pLast, iCol);
|
||||
if (eKey >= tTsVal->ts) {
|
||||
invalidate = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (invalidate) {
|
||||
taosLRUCacheRelease(pCache, h, true);
|
||||
} else {
|
||||
taosLRUCacheRelease(pCache, h, false);
|
||||
}
|
||||
// void taosLRUCacheErase(SLRUCache * cache, const void *key, size_t keyLen);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
*/
|
||||
int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, TSDBROW *row, bool dup) {
|
||||
int32_t code = 0;
|
||||
STSRow *cacheRow = NULL;
|
||||
|
@ -1692,10 +1592,51 @@ _err:
|
|||
return code;
|
||||
}
|
||||
|
||||
static void freeTableInfoFunc(void *param) {
|
||||
void **p = (void **)param;
|
||||
taosMemoryFreeClear(*p);
|
||||
}
|
||||
|
||||
static STableLoadInfo *getTableLoadInfo(SCacheRowsReader *pReader, uint64_t uid) {
|
||||
if (!pReader->pTableMap) {
|
||||
pReader->pTableMap = tSimpleHashInit(pReader->numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
|
||||
|
||||
tSimpleHashSetFreeFp(pReader->pTableMap, freeTableInfoFunc);
|
||||
}
|
||||
|
||||
STableLoadInfo *pInfo = NULL;
|
||||
STableLoadInfo **ppInfo = tSimpleHashGet(pReader->pTableMap, &uid, sizeof(uid));
|
||||
if (!ppInfo) {
|
||||
pInfo = taosMemoryCalloc(1, sizeof(STableLoadInfo));
|
||||
tSimpleHashPut(pReader->pTableMap, &uid, sizeof(uint64_t), &pInfo, POINTER_BYTES);
|
||||
|
||||
return pInfo;
|
||||
}
|
||||
|
||||
return *ppInfo;
|
||||
}
|
||||
|
||||
static uint64_t *getUidList(SCacheRowsReader *pReader) {
|
||||
if (!pReader->uidList) {
|
||||
int32_t numOfTables = pReader->numOfTables;
|
||||
|
||||
pReader->uidList = taosMemoryMalloc(numOfTables * sizeof(uint64_t));
|
||||
|
||||
for (int32_t i = 0; i < numOfTables; ++i) {
|
||||
uint64_t uid = pReader->pTableList[i].uid;
|
||||
pReader->uidList[i] = uid;
|
||||
}
|
||||
|
||||
taosSort(pReader->uidList, numOfTables, sizeof(uint64_t), uidComparFunc);
|
||||
}
|
||||
|
||||
return pReader->uidList;
|
||||
}
|
||||
|
||||
static int32_t loadTombFromBlk(const TTombBlkArray *pTombBlkArray, SCacheRowsReader *pReader, void *pFileReader,
|
||||
bool isFile) {
|
||||
int32_t code = 0;
|
||||
uint64_t *uidList = pReader->uidList;
|
||||
uint64_t *uidList = getUidList(pReader);
|
||||
int32_t numOfTables = pReader->numOfTables;
|
||||
int64_t suid = pReader->info.suid;
|
||||
|
||||
|
@ -1718,7 +1659,7 @@ static int32_t loadTombFromBlk(const TTombBlkArray *pTombBlkArray, SCacheRowsRea
|
|||
}
|
||||
|
||||
uint64_t uid = uidList[j];
|
||||
STableLoadInfo *pInfo = *(STableLoadInfo **)tSimpleHashGet(pReader->pTableMap, &uid, sizeof(uid));
|
||||
STableLoadInfo *pInfo = getTableLoadInfo(pReader, uid);
|
||||
if (pInfo->pTombData == NULL) {
|
||||
pInfo->pTombData = taosArrayInit(4, sizeof(SDelData));
|
||||
}
|
||||
|
@ -1760,13 +1701,16 @@ static int32_t loadTombFromBlk(const TTombBlkArray *pTombBlkArray, SCacheRowsRea
|
|||
}
|
||||
|
||||
if (newTable) {
|
||||
pInfo = *(STableLoadInfo **)tSimpleHashGet(pReader->pTableMap, &uid, sizeof(uid));
|
||||
pInfo = getTableLoadInfo(pReader, uid);
|
||||
if (pInfo->pTombData == NULL) {
|
||||
pInfo->pTombData = taosArrayInit(4, sizeof(SDelData));
|
||||
}
|
||||
}
|
||||
|
||||
if (record.version <= pReader->info.verRange.maxVer) {
|
||||
tsdbError("tomb xx load/cache: vgId:%d fid:%d commit %" PRId64 "~%" PRId64 "~%" PRId64 " tomb records",
|
||||
TD_VID(pReader->pTsdb->pVnode), pReader->pCurFileSet->fid, record.skey, record.ekey, uid);
|
||||
|
||||
SDelData delData = {.version = record.version, .sKey = record.skey, .eKey = record.ekey};
|
||||
taosArrayPush(pInfo->pTombData, &delData);
|
||||
}
|
||||
|
@ -1892,12 +1836,10 @@ struct CacheNextRowIter;
|
|||
|
||||
typedef struct SFSNextRowIter {
|
||||
SFSNEXTROWSTATES state; // [input]
|
||||
STsdb *pTsdb; // [input]
|
||||
SBlockIdx *pBlockIdxExp; // [input]
|
||||
STSchema *pTSchema; // [input]
|
||||
tb_uid_t suid;
|
||||
tb_uid_t uid;
|
||||
int32_t nFileSet;
|
||||
int32_t iFileSet;
|
||||
STFileSet *pFileSet;
|
||||
TFileSetArray *aDFileSet;
|
||||
|
@ -1928,10 +1870,10 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie
|
|||
int nCols) {
|
||||
SFSNextRowIter *state = (SFSNextRowIter *)iter;
|
||||
int32_t code = 0;
|
||||
STsdb *pTsdb = state->pr->pTsdb;
|
||||
|
||||
if (SFSNEXTROW_FS == state->state) {
|
||||
state->nFileSet = TARRAY2_SIZE(state->aDFileSet);
|
||||
state->iFileSet = state->nFileSet;
|
||||
state->iFileSet = TARRAY2_SIZE(state->aDFileSet);
|
||||
|
||||
state->state = SFSNEXTROW_FILESET;
|
||||
}
|
||||
|
@ -1950,7 +1892,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie
|
|||
STFileObj **pFileObj = state->pFileSet->farr;
|
||||
if (pFileObj[0] != NULL || pFileObj[3] != NULL) {
|
||||
if (state->pFileSet != state->pr->pCurFileSet) {
|
||||
SDataFileReaderConfig conf = {.tsdb = state->pTsdb, .szPage = state->pTsdb->pVnode->config.tsdbPageSize};
|
||||
SDataFileReaderConfig conf = {.tsdb = pTsdb, .szPage = pTsdb->pVnode->config.tsdbPageSize};
|
||||
const char *filesName[4] = {0};
|
||||
if (pFileObj[0] != NULL) {
|
||||
conf.files[0].file = *pFileObj[0]->f;
|
||||
|
@ -1977,9 +1919,14 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie
|
|||
goto _err;
|
||||
}
|
||||
|
||||
state->pr->pCurFileSet = state->pFileSet;
|
||||
|
||||
loadDataTomb(state->pr, state->pr->pFileReader);
|
||||
|
||||
state->pr->pCurFileSet = state->pFileSet;
|
||||
int32_t code = tsdbDataFileReadBrinBlk(state->pr->pFileReader, &state->pr->pBlkArray);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
if (!state->pIndexList) {
|
||||
|
@ -1987,12 +1934,8 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie
|
|||
} else {
|
||||
taosArrayClear(state->pIndexList);
|
||||
}
|
||||
const TBrinBlkArray *pBlkArray = NULL;
|
||||
|
||||
int32_t code = tsdbDataFileReadBrinBlk(state->pr->pFileReader, &pBlkArray);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _err;
|
||||
}
|
||||
const TBrinBlkArray *pBlkArray = state->pr->pBlkArray;
|
||||
|
||||
for (int i = TARRAY2_SIZE(pBlkArray) - 1; i >= 0; --i) {
|
||||
SBrinBlk *pBrinBlk = &pBlkArray->data[i];
|
||||
|
@ -2008,17 +1951,20 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie
|
|||
|
||||
int indexSize = TARRAY_SIZE(state->pIndexList);
|
||||
if (indexSize <= 0) {
|
||||
clearLastFileSet(state);
|
||||
state->state = SFSNEXTROW_FILESET;
|
||||
goto _next_fileset;
|
||||
goto _check_stt_data;
|
||||
}
|
||||
|
||||
state->state = SFSNEXTROW_INDEXLIST;
|
||||
state->iBrinIndex = indexSize;
|
||||
}
|
||||
|
||||
code = lastIterOpen(&state->lastIter, state->pFileSet, state->pTsdb, state->pTSchema, state->suid, state->uid,
|
||||
state->pr, state->lastTs, aCols, nCols);
|
||||
_check_stt_data:
|
||||
if (state->pFileSet != state->pr->pCurFileSet) {
|
||||
state->pr->pCurFileSet = state->pFileSet;
|
||||
}
|
||||
|
||||
code = lastIterOpen(&state->lastIter, state->pFileSet, pTsdb, state->pTSchema, state->suid, state->uid, state->pr,
|
||||
state->lastTs, aCols, nCols);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _err;
|
||||
}
|
||||
|
@ -2438,7 +2384,6 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs
|
|||
|
||||
pIter->fsState.pRowIter = pIter;
|
||||
pIter->fsState.state = SFSNEXTROW_FS;
|
||||
pIter->fsState.pTsdb = pTsdb;
|
||||
pIter->fsState.aDFileSet = pReadSnap->pfSetArray;
|
||||
pIter->fsState.pBlockIdxExp = &pIter->idx;
|
||||
pIter->fsState.pTSchema = pTSchema;
|
||||
|
@ -2555,14 +2500,17 @@ static int32_t nextRowIterGet(CacheNextRowIter *pIter, TSDBROW **ppRow, bool *pI
|
|||
pIter->pSkyline = taosArrayInit(32, sizeof(TSDBKEY));
|
||||
|
||||
uint64_t uid = pIter->idx.uid;
|
||||
STableLoadInfo *pInfo = *(STableLoadInfo **)tSimpleHashGet(pIter->pr->pTableMap, &uid, sizeof(uid));
|
||||
SArray *pTombData = pInfo->pTombData;
|
||||
if (pTombData) {
|
||||
taosArrayAddAll(pTombData, pIter->pMemDelData);
|
||||
|
||||
code = tsdbBuildDeleteSkyline(pTombData, 0, (int32_t)(TARRAY_SIZE(pTombData) - 1), pIter->pSkyline);
|
||||
STableLoadInfo *pInfo = getTableLoadInfo(pIter->pr, uid);
|
||||
if (pInfo->pTombData == NULL) {
|
||||
pInfo->pTombData = taosArrayInit(4, sizeof(SDelData));
|
||||
}
|
||||
|
||||
taosArrayAddAll(pInfo->pTombData, pIter->pMemDelData);
|
||||
|
||||
size_t delSize = TARRAY_SIZE(pInfo->pTombData);
|
||||
if (delSize > 0) {
|
||||
code = tsdbBuildDeleteSkyline(pInfo->pTombData, 0, (int32_t)(delSize - 1), pIter->pSkyline);
|
||||
}
|
||||
pIter->iSkyline = taosArrayGetSize(pIter->pSkyline) - 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "tcommon.h"
|
||||
#include "tsdb.h"
|
||||
#include "tsdbDataFileRW.h"
|
||||
#include "tsdbReadUtil.h"
|
||||
|
||||
#define HASTYPE(_type, _t) (((_type) & (_t)) == (_t))
|
||||
|
||||
|
@ -133,21 +134,6 @@ int32_t tsdbReuseCacherowsReader(void* reader, void* pTableIdList, int32_t numOf
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t uidComparFunc(const void* p1, const void* p2) {
|
||||
uint64_t pu1 = *(uint64_t*)p1;
|
||||
uint64_t pu2 = *(uint64_t*)p2;
|
||||
if (pu1 == pu2) {
|
||||
return 0;
|
||||
} else {
|
||||
return (pu1 < pu2) ? -1 : 1;
|
||||
}
|
||||
}
|
||||
|
||||
static void freeTableInfoFunc(void* param) {
|
||||
void** p = (void**)param;
|
||||
taosMemoryFreeClear(*p);
|
||||
}
|
||||
|
||||
int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, int32_t numOfTables, int32_t numOfCols,
|
||||
SArray* pCidList, int32_t* pSlotIds, uint64_t suid, void** pReader, const char* idstr) {
|
||||
*pReader = NULL;
|
||||
|
@ -173,27 +159,6 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList,
|
|||
p->pTableList = pTableIdList;
|
||||
p->numOfTables = numOfTables;
|
||||
|
||||
p->pTableMap = tSimpleHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
|
||||
if (p->pTableMap == NULL) {
|
||||
tsdbCacherowsReaderClose(p);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
p->uidList = taosMemoryMalloc(numOfTables * sizeof(uint64_t));
|
||||
if (p->uidList == NULL) {
|
||||
tsdbCacherowsReaderClose(p);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
for (int32_t i = 0; i < numOfTables; ++i) {
|
||||
uint64_t uid = p->pTableList[i].uid;
|
||||
p->uidList[i] = uid;
|
||||
STableLoadInfo* pInfo = taosMemoryCalloc(1, sizeof(STableLoadInfo));
|
||||
tSimpleHashPut(p->pTableMap, &uid, sizeof(uint64_t), &pInfo, POINTER_BYTES);
|
||||
}
|
||||
|
||||
tSimpleHashSetFreeFp(p->pTableMap, freeTableInfoFunc);
|
||||
|
||||
taosSort(p->uidList, numOfTables, sizeof(uint64_t), uidComparFunc);
|
||||
|
||||
int32_t code = setTableSchema(p, suid, idstr);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbCacherowsReaderClose(p);
|
||||
|
@ -216,14 +181,6 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList,
|
|||
}
|
||||
}
|
||||
|
||||
SVnodeCfg* pCfg = &((SVnode*)pVnode)->config;
|
||||
int32_t numOfStt = pCfg->sttTrigger;
|
||||
p->pLDataIterArray = taosArrayInit(4, POINTER_BYTES);
|
||||
if (p->pLDataIterArray == NULL) {
|
||||
tsdbCacherowsReaderClose(p);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
p->idstr = taosStrdup(idstr);
|
||||
taosThreadMutexInit(&p->readerMutex, NULL);
|
||||
|
||||
|
@ -250,9 +207,11 @@ void* tsdbCacherowsReaderClose(void* pReader) {
|
|||
|
||||
taosMemoryFree(p->pCurrSchema);
|
||||
|
||||
int64_t loadBlocks = 0;
|
||||
double elapse = 0;
|
||||
destroySttBlockReader(p->pLDataIterArray, &loadBlocks, &elapse);
|
||||
if (p->pLDataIterArray) {
|
||||
int64_t loadBlocks = 0;
|
||||
double elapse = 0;
|
||||
destroySttBlockReader(p->pLDataIterArray, &loadBlocks, &elapse);
|
||||
}
|
||||
|
||||
if (p->pFileReader) {
|
||||
tsdbDataFileReaderClose(&p->pFileReader);
|
||||
|
@ -318,7 +277,6 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
|
|||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SArray* pRow = taosArrayInit(TARRAY_SIZE(pr->pCidList), sizeof(SLastCol));
|
||||
bool hasRes = false;
|
||||
SArray* pLastCols = NULL;
|
||||
|
||||
void** pRes = taosMemoryCalloc(pr->numOfCols, POINTER_BYTES);
|
||||
if (pRes == NULL) {
|
||||
|
@ -327,57 +285,47 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
|
|||
}
|
||||
|
||||
for (int32_t j = 0; j < pr->numOfCols; ++j) {
|
||||
pRes[j] =
|
||||
taosMemoryCalloc(1, sizeof(SFirstLastRes) + pr->pSchema->columns[/*-1 == slotIds[j] ? 0 : */ slotIds[j]].bytes +
|
||||
VARSTR_HEADER_SIZE);
|
||||
pRes[j] = taosMemoryCalloc(1, sizeof(SFirstLastRes) + pr->pSchema->columns[slotIds[j]].bytes + VARSTR_HEADER_SIZE);
|
||||
SFirstLastRes* p = (SFirstLastRes*)varDataVal(pRes[j]);
|
||||
p->ts = INT64_MIN;
|
||||
}
|
||||
|
||||
pLastCols = taosArrayInit(pr->numOfCols, sizeof(SLastCol));
|
||||
if (pLastCols == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _end;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pr->numOfCols; ++i) {
|
||||
int32_t slotId = slotIds[i];
|
||||
struct STColumn* pCol = &pr->pSchema->columns[slotId];
|
||||
SLastCol p = {.ts = INT64_MIN, .colVal.type = pCol->type, .colVal.flag = CV_FLAG_NULL};
|
||||
|
||||
if (IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
p.colVal.value.pData = taosMemoryCalloc(pCol->bytes, sizeof(char));
|
||||
}
|
||||
taosArrayPush(pLastCols, &p);
|
||||
}
|
||||
|
||||
taosThreadMutexLock(&pr->readerMutex);
|
||||
code = tsdbTakeReadSnap2((STsdbReader*)pr, tsdbCacheQueryReseek, &pr->pReadSnap);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _end;
|
||||
}
|
||||
|
||||
int8_t ltype = (pr->type & CACHESCAN_RETRIEVE_LAST) >> 3;
|
||||
int8_t ltype = (pr->type & CACHESCAN_RETRIEVE_LAST) >> 3;
|
||||
STableKeyInfo* pTableList = pr->pTableList;
|
||||
|
||||
// retrieve the only one last row of all tables in the uid list.
|
||||
if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_TYPE_SINGLE)) {
|
||||
SArray* pLastCols = taosArrayInit(pr->numOfCols, sizeof(SLastCol));
|
||||
if (pLastCols == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _end;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pr->numOfCols; ++i) {
|
||||
int32_t slotId = slotIds[i];
|
||||
struct STColumn* pCol = &pr->pSchema->columns[slotId];
|
||||
SLastCol p = {.ts = INT64_MIN, .colVal.type = pCol->type, .colVal.flag = CV_FLAG_NULL};
|
||||
|
||||
if (IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
p.colVal.value.pData = taosMemoryCalloc(pCol->bytes, sizeof(char));
|
||||
}
|
||||
taosArrayPush(pLastCols, &p);
|
||||
}
|
||||
|
||||
int64_t st = taosGetTimestampUs();
|
||||
int64_t totalLastTs = INT64_MAX;
|
||||
|
||||
for (int32_t i = 0; i < pr->numOfTables; ++i) {
|
||||
STableKeyInfo* pKeyInfo = &pr->pTableList[i];
|
||||
tb_uid_t uid = pTableList[i].uid;
|
||||
|
||||
tsdbCacheGetBatch(pr->pTsdb, pKeyInfo->uid, pRow, pr, ltype);
|
||||
// tsdbCacheGet(pr->pTsdb, pKeyInfo->uid, pRow, pr, ltype);
|
||||
if (TARRAY_SIZE(pRow) <= 0) {
|
||||
tsdbCacheGetBatch(pr->pTsdb, uid, pRow, pr, ltype);
|
||||
if (TARRAY_SIZE(pRow) <= 0 || COL_VAL_IS_NONE(&((SLastCol*)TARRAY_DATA(pRow))[0].colVal)) {
|
||||
taosArrayClearEx(pRow, freeItem);
|
||||
// taosArrayClear(pRow);
|
||||
continue;
|
||||
}
|
||||
SLastCol* pColVal = taosArrayGet(pRow, 0);
|
||||
if (COL_VAL_IS_NONE(&pColVal->colVal)) {
|
||||
taosArrayClearEx(pRow, freeItem);
|
||||
// taosArrayClear(pRow);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -400,9 +348,9 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
|
|||
p->ts = pColVal->ts;
|
||||
if (k == 0) {
|
||||
if (TARRAY_SIZE(pTableUidList) == 0) {
|
||||
taosArrayPush(pTableUidList, &pKeyInfo->uid);
|
||||
taosArrayPush(pTableUidList, &uid);
|
||||
} else {
|
||||
taosArraySet(pTableUidList, 0, &pKeyInfo->uid);
|
||||
taosArraySet(pTableUidList, 0, &uid);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -437,32 +385,25 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
|
|||
}
|
||||
|
||||
taosArrayClearEx(pRow, freeItem);
|
||||
// taosArrayClear(pRow);
|
||||
}
|
||||
|
||||
if (hasRes) {
|
||||
saveOneRow(pLastCols, pResBlock, pr, slotIds, dstSlotIds, pRes, pr->idstr);
|
||||
}
|
||||
|
||||
taosArrayDestroyEx(pLastCols, freeItem);
|
||||
} else if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_TYPE_ALL)) {
|
||||
for (int32_t i = pr->tableIndex; i < pr->numOfTables; ++i) {
|
||||
tb_uid_t uid = pr->pTableList[i].uid;
|
||||
tb_uid_t uid = pTableList[i].uid;
|
||||
|
||||
tsdbCacheGetBatch(pr->pTsdb, uid, pRow, pr, ltype);
|
||||
if (TARRAY_SIZE(pRow) <= 0) {
|
||||
if (TARRAY_SIZE(pRow) <= 0 || COL_VAL_IS_NONE(&((SLastCol*)TARRAY_DATA(pRow))[0].colVal)) {
|
||||
taosArrayClearEx(pRow, freeItem);
|
||||
// taosArrayClear(pRow);
|
||||
continue;
|
||||
}
|
||||
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0);
|
||||
if (COL_VAL_IS_NONE(&pColVal->colVal)) {
|
||||
taosArrayClearEx(pRow, freeItem);
|
||||
// taosArrayClear(pRow);
|
||||
continue;
|
||||
}
|
||||
|
||||
saveOneRow(pRow, pResBlock, pr, slotIds, dstSlotIds, pRes, pr->idstr);
|
||||
taosArrayClearEx(pRow, freeItem);
|
||||
// taosArrayClear(pRow);
|
||||
|
||||
taosArrayPush(pTableUidList, &uid);
|
||||
|
||||
|
@ -478,11 +419,6 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
|
|||
_end:
|
||||
tsdbUntakeReadSnap2((STsdbReader*)pr, pr->pReadSnap, true);
|
||||
|
||||
int64_t loadBlocks = 0;
|
||||
double elapse = 0;
|
||||
pr->pLDataIterArray = destroySttBlockReader(pr->pLDataIterArray, &loadBlocks, &elapse);
|
||||
pr->pLDataIterArray = taosArrayInit(4, POINTER_BYTES);
|
||||
|
||||
taosThreadMutexUnlock(&pr->readerMutex);
|
||||
|
||||
if (pRes != NULL) {
|
||||
|
@ -492,9 +428,7 @@ _end:
|
|||
}
|
||||
|
||||
taosMemoryFree(pRes);
|
||||
// taosArrayDestroyEx(pRow, freeItem);
|
||||
taosArrayDestroy(pRow);
|
||||
taosArrayDestroyEx(pLastCols, freeItem);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -1229,11 +1229,16 @@ static int32_t tsdbDataFileDoWriteTombRecord(SDataFileWriter *writer, const STom
|
|||
|
||||
int32_t c = tTombRecordCompare(record, record1);
|
||||
if (c < 0) {
|
||||
break;
|
||||
goto _write;
|
||||
} else if (c > 0) {
|
||||
code = tTombBlockPut(writer->tombBlock, record1);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
tsdbTrace("vgId:%d write tomb record to tomb file:%s, cid:%" PRId64 ", suid:%" PRId64 ", uid:%" PRId64
|
||||
", version:%" PRId64,
|
||||
TD_VID(writer->config->tsdb->pVnode), writer->fd[TSDB_FTYPE_TOMB]->path, writer->config->cid,
|
||||
record1->suid, record1->uid, record1->version);
|
||||
|
||||
if (TOMB_BLOCK_SIZE(writer->tombBlock) >= writer->config->maxRow) {
|
||||
code = tsdbDataFileDoWriteTombBlock(writer);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
@ -1266,6 +1271,11 @@ _write:
|
|||
code = tTombBlockPut(writer->tombBlock, record);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
tsdbTrace("vgId:%d write tomb record to tomb file:%s, cid:%" PRId64 ", suid:%" PRId64 ", uid:%" PRId64
|
||||
", version:%" PRId64,
|
||||
TD_VID(writer->config->tsdb->pVnode), writer->fd[TSDB_FTYPE_TOMB]->path, writer->config->cid, record->suid,
|
||||
record->uid, record->version);
|
||||
|
||||
if (TOMB_BLOCK_SIZE(writer->tombBlock) >= writer->config->maxRow) {
|
||||
code = tsdbDataFileDoWriteTombBlock(writer);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
|
|
@ -537,6 +537,9 @@ static void tsdbDoWaitBgTask(STFileSystem *fs, STFSBgTask *task) {
|
|||
|
||||
if (task->numWait == 0) {
|
||||
taosThreadCondDestroy(task->done);
|
||||
if (task->free) {
|
||||
task->free(task->arg);
|
||||
}
|
||||
taosMemoryFree(task);
|
||||
}
|
||||
}
|
||||
|
@ -546,6 +549,9 @@ static void tsdbDoDoneBgTask(STFileSystem *fs, STFSBgTask *task) {
|
|||
taosThreadCondBroadcast(task->done);
|
||||
} else {
|
||||
taosThreadCondDestroy(task->done);
|
||||
if (task->free) {
|
||||
task->free(task->arg);
|
||||
}
|
||||
taosMemoryFree(task);
|
||||
}
|
||||
}
|
||||
|
@ -627,7 +633,7 @@ int32_t tsdbFSEditCommit(STFileSystem *fs) {
|
|||
SSttLvl *lvl = TARRAY2_FIRST(fset->lvlArr);
|
||||
if (lvl->level != 0 || TARRAY2_SIZE(lvl->fobjArr) < fs->tsdb->pVnode->config.sttTrigger) continue;
|
||||
|
||||
code = tsdbFSScheduleBgTask(fs, TSDB_BG_TASK_MERGER, tsdbMerge, fs->tsdb, NULL);
|
||||
code = tsdbFSScheduleBgTask(fs, TSDB_BG_TASK_MERGER, tsdbMerge, NULL, fs->tsdb, NULL);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
break;
|
||||
|
@ -774,8 +780,8 @@ static int32_t tsdbFSRunBgTask(void *arg) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t tsdbFSScheduleBgTaskImpl(STFileSystem *fs, EFSBgTaskT type, int32_t (*run)(void *), void *arg,
|
||||
int64_t *taskid) {
|
||||
static int32_t tsdbFSScheduleBgTaskImpl(STFileSystem *fs, EFSBgTaskT type, int32_t (*run)(void *), void (*free)(void *),
|
||||
void *arg, int64_t *taskid) {
|
||||
if (fs->stop) {
|
||||
return 0; // TODO: use a better error code
|
||||
}
|
||||
|
@ -798,6 +804,7 @@ static int32_t tsdbFSScheduleBgTaskImpl(STFileSystem *fs, EFSBgTaskT type, int32
|
|||
|
||||
task->type = type;
|
||||
task->run = run;
|
||||
task->free = free;
|
||||
task->arg = arg;
|
||||
task->scheduleTime = taosGetTimestampMs();
|
||||
task->taskid = ++fs->taskid;
|
||||
|
@ -819,9 +826,10 @@ static int32_t tsdbFSScheduleBgTaskImpl(STFileSystem *fs, EFSBgTaskT type, int32
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tsdbFSScheduleBgTask(STFileSystem *fs, EFSBgTaskT type, int32_t (*run)(void *), void *arg, int64_t *taskid) {
|
||||
int32_t tsdbFSScheduleBgTask(STFileSystem *fs, EFSBgTaskT type, int32_t (*run)(void *), void (*free)(void *), void *arg,
|
||||
int64_t *taskid) {
|
||||
taosThreadMutexLock(fs->mutex);
|
||||
int32_t code = tsdbFSScheduleBgTaskImpl(fs, type, run, arg, taskid);
|
||||
int32_t code = tsdbFSScheduleBgTaskImpl(fs, type, run, free, arg, taskid);
|
||||
taosThreadMutexUnlock(fs->mutex);
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -59,7 +59,8 @@ int32_t tsdbFSEditBegin(STFileSystem *fs, const TFileOpArray *opArray, EFEditT e
|
|||
int32_t tsdbFSEditCommit(STFileSystem *fs);
|
||||
int32_t tsdbFSEditAbort(STFileSystem *fs);
|
||||
// background task
|
||||
int32_t tsdbFSScheduleBgTask(STFileSystem *fs, EFSBgTaskT type, int32_t (*run)(void *), void *arg, int64_t *taskid);
|
||||
int32_t tsdbFSScheduleBgTask(STFileSystem *fs, EFSBgTaskT type, int32_t (*run)(void *), void (*free)(void *), void *arg,
|
||||
int64_t *taskid);
|
||||
int32_t tsdbFSWaitBgTask(STFileSystem *fs, int64_t taskid);
|
||||
int32_t tsdbFSWaitAllBgTask(STFileSystem *fs);
|
||||
int32_t tsdbFSDisableBgTask(STFileSystem *fs);
|
||||
|
@ -70,6 +71,7 @@ int32_t tsdbFSGetFSet(STFileSystem *fs, int32_t fid, STFileSet **fset);
|
|||
struct STFSBgTask {
|
||||
EFSBgTaskT type;
|
||||
int32_t (*run)(void *arg);
|
||||
void (*free)(void *arg);
|
||||
void *arg;
|
||||
|
||||
TdThreadCond done[1];
|
||||
|
|
|
@ -413,6 +413,13 @@ int32_t tLDataIterOpen2(struct SLDataIter *pIter, SSttFileReader *pSttFileReader
|
|||
pIter->pReader = pSttFileReader;
|
||||
pIter->pBlockLoadInfo = pBlockLoadInfo;
|
||||
|
||||
if (pIter->pReader == NULL) {
|
||||
tsdbError("stt file reader is null, %s", idStr);
|
||||
pIter->pSttBlk = NULL;
|
||||
pIter->iSttBlk = -1;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (!pBlockLoadInfo->sttBlockLoaded) {
|
||||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
|
@ -759,7 +766,6 @@ int32_t tMergeTreeOpen2(SMergeTree *pMTree, SMergeTreeConf *pConf) {
|
|||
|
||||
pMTree->ignoreEarlierTs = false;
|
||||
|
||||
// todo handle other level of stt files, here only deal with the first level stt
|
||||
int32_t size = ((STFileSet *)pConf->pCurrentFileset)->lvlArr->size;
|
||||
if (size == 0) {
|
||||
goto _end;
|
||||
|
@ -784,6 +790,12 @@ int32_t tMergeTreeOpen2(SMergeTree *pMTree, SMergeTreeConf *pConf) {
|
|||
SLDataIter *pIter = taosMemoryCalloc(1, sizeof(SLDataIter));
|
||||
taosArrayPush(pList, &pIter);
|
||||
}
|
||||
} else if (numOfIter > TARRAY2_SIZE(pSttLevel->fobjArr)){
|
||||
int32_t inc = numOfIter - TARRAY2_SIZE(pSttLevel->fobjArr);
|
||||
for (int i = 0; i < inc; ++i) {
|
||||
SLDataIter *pIter = taosArrayPop(pList);
|
||||
destroyLDataIter(pIter);
|
||||
}
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < TARRAY2_SIZE(pSttLevel->fobjArr); ++i) { // open all last file
|
||||
|
@ -799,7 +811,8 @@ int32_t tMergeTreeOpen2(SMergeTree *pMTree, SMergeTreeConf *pConf) {
|
|||
|
||||
code = tsdbSttFileReaderOpen(pSttLevel->fobjArr->data[i]->fname, &conf, &pSttFileReader);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
tsdbError("open stt file reader error. file name %s, code %s, %s", pSttLevel->fobjArr->data[i]->fname,
|
||||
tstrerror(code), pMTree->idStr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -814,7 +827,7 @@ int32_t tMergeTreeOpen2(SMergeTree *pMTree, SMergeTreeConf *pConf) {
|
|||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _end;
|
||||
}
|
||||
|
||||
|
||||
bool hasVal = tLDataIterNextRow(pIter, pMTree->idStr);
|
||||
if (hasVal) {
|
||||
tMergeTreeAddIter(pMTree, pIter);
|
||||
|
|
|
@ -1729,45 +1729,41 @@ static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader
|
|||
|
||||
// row in last file block
|
||||
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
|
||||
int64_t tsLast = getCurrentKeyInLastBlock(pLastBlockReader);
|
||||
int64_t ts = getCurrentKeyInLastBlock(pLastBlockReader);
|
||||
|
||||
if (ASCENDING_TRAVERSE(pReader->info.order)) {
|
||||
if (key < tsLast) {
|
||||
if (key < ts) { // imem, mem are all empty, file blocks (data blocks and last block) exist
|
||||
return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader);
|
||||
} else if (key > tsLast) {
|
||||
return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, NULL, false);
|
||||
}
|
||||
} else {
|
||||
if (key > tsLast) {
|
||||
return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader);
|
||||
} else if (key < tsLast) {
|
||||
} else if (key == ts) {
|
||||
SRow* pTSRow = NULL;
|
||||
int32_t code = tsdbRowMergerAdd(pMerger, &fRow, pReader->info.pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader);
|
||||
|
||||
TSDBROW* pRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
|
||||
tsdbRowMergerAdd(pMerger, pRow1, NULL);
|
||||
|
||||
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, pMerger, &pReader->info.verRange, pReader->idStr);
|
||||
|
||||
code = tsdbRowMergerGetRow(pMerger, &pTSRow);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo);
|
||||
|
||||
taosMemoryFree(pTSRow);
|
||||
tsdbRowMergerClear(pMerger);
|
||||
return code;
|
||||
} else { // key > ts
|
||||
return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, NULL, false);
|
||||
}
|
||||
} else { // desc order
|
||||
return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, pBlockData, true);
|
||||
}
|
||||
// the following for key == tsLast
|
||||
SRow* pTSRow = NULL;
|
||||
int32_t code = tsdbRowMergerAdd(pMerger, &fRow, pReader->info.pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader);
|
||||
|
||||
TSDBROW* pRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
|
||||
tsdbRowMergerAdd(pMerger, pRow1, NULL);
|
||||
|
||||
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->info.verRange, pReader->idStr);
|
||||
|
||||
code = tsdbRowMergerGetRow(pMerger, &pTSRow);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo);
|
||||
|
||||
taosMemoryFree(pTSRow);
|
||||
tsdbRowMergerClear(pMerger);
|
||||
return code;
|
||||
|
||||
} else { // only last block exists
|
||||
return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, NULL, false);
|
||||
}
|
||||
|
@ -2194,8 +2190,7 @@ static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanI
|
|||
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
|
||||
|
||||
TSDBROW *pRow = NULL, *piRow = NULL;
|
||||
int64_t key = (pBlockData->nRow > 0 && (!pDumpInfo->allDumped)) ? pBlockData->aTSKEY[pDumpInfo->rowIndex] :
|
||||
(ASCENDING_TRAVERSE(pReader->info.order) ? INT64_MAX : INT64_MIN);
|
||||
int64_t key = (pBlockData->nRow > 0 && (!pDumpInfo->allDumped)) ? pBlockData->aTSKEY[pDumpInfo->rowIndex] : INT64_MIN;
|
||||
if (pBlockScanInfo->iter.hasVal) {
|
||||
pRow = getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
|
||||
}
|
||||
|
@ -2569,18 +2564,9 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
|
|||
|
||||
// load the last data block of current table
|
||||
STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)pStatus->pTableIter;
|
||||
if (pScanInfo == NULL) {
|
||||
tsdbError("table Iter is null, invalid pScanInfo, try next table %s", pReader->idStr);
|
||||
bool hasNexTable = moveToNextTable(pUidList, pStatus);
|
||||
if (!hasNexTable) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pReader->pIgnoreTables && taosHashGet(*pReader->pIgnoreTables, &pScanInfo->uid, sizeof(pScanInfo->uid))) {
|
||||
// reset the index in last block when handing a new file
|
||||
// doCleanupTableScanInfo(pScanInfo);
|
||||
bool hasNexTable = moveToNextTable(pUidList, pStatus);
|
||||
if (!hasNexTable) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -2589,6 +2575,9 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
|
|||
continue;
|
||||
}
|
||||
|
||||
// reset the index in last block when handing a new file
|
||||
// doCleanupTableScanInfo(pScanInfo);
|
||||
|
||||
bool hasDataInLastFile = initLastBlockReader(pLastBlockReader, pScanInfo, pReader);
|
||||
if (!hasDataInLastFile) {
|
||||
bool hasNexTable = moveToNextTable(pUidList, pStatus);
|
||||
|
@ -2678,32 +2667,16 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
|
|||
(ASCENDING_TRAVERSE(pReader->info.order)) ? pBlockInfo->record.firstKey : pBlockInfo->record.lastKey;
|
||||
code = buildDataBlockFromBuf(pReader, pScanInfo, endKey);
|
||||
} else {
|
||||
bool bHasDataInLastBlock = hasDataInLastBlock(pLastBlockReader);
|
||||
int64_t tsLast = bHasDataInLastBlock ? getCurrentKeyInLastBlock(pLastBlockReader) : INT64_MIN;
|
||||
if (!bHasDataInLastBlock || ((ASCENDING_TRAVERSE(pReader->info.order) && pBlockInfo->record.lastKey < tsLast) ||
|
||||
(!ASCENDING_TRAVERSE(pReader->info.order) && pBlockInfo->record.firstKey > tsLast))) {
|
||||
// whole block is required, return it directly
|
||||
SDataBlockInfo* pInfo = &pReader->resBlockInfo.pResBlock->info;
|
||||
pInfo->rows = pBlockInfo->record.numRow;
|
||||
pInfo->id.uid = pScanInfo->uid;
|
||||
pInfo->dataLoad = 0;
|
||||
pInfo->window = (STimeWindow){.skey = pBlockInfo->record.firstKey, .ekey = pBlockInfo->record.lastKey};
|
||||
setComposedBlockFlag(pReader, false);
|
||||
setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlockInfo->record.lastKey, pReader->info.order);
|
||||
if (hasDataInLastBlock(pLastBlockReader) && !ASCENDING_TRAVERSE(pReader->info.order)) {
|
||||
// only return the rows in last block
|
||||
int64_t tsLast = getCurrentKeyInLastBlock(pLastBlockReader);
|
||||
ASSERT(tsLast >= pBlockInfo->record.lastKey);
|
||||
|
||||
// update the last key for the corresponding table
|
||||
pScanInfo->lastKey = ASCENDING_TRAVERSE(pReader->info.order) ? pInfo->window.ekey : pInfo->window.skey;
|
||||
tsdbDebug("%p uid:%" PRIu64
|
||||
" clean file block retrieved from file, global index:%d, "
|
||||
"table index:%d, rows:%d, brange:%" PRId64 "-%" PRId64 ", %s",
|
||||
pReader, pScanInfo->uid, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlockInfo->record.numRow,
|
||||
pBlockInfo->record.firstKey, pBlockInfo->record.lastKey, pReader->idStr);
|
||||
} else {
|
||||
SBlockData* pBData = &pReader->status.fileBlockData;
|
||||
tBlockDataReset(pBData);
|
||||
|
||||
SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
|
||||
tsdbDebug("load data in last block firstly %s", pReader->idStr);
|
||||
tsdbDebug("load data in last block firstly, due to desc scan data, %s", pReader->idStr);
|
||||
|
||||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
|
@ -2734,8 +2707,23 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
|
|||
pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey,
|
||||
pResBlock->info.rows, el, pReader->idStr);
|
||||
}
|
||||
}
|
||||
} else { // whole block is required, return it directly
|
||||
SDataBlockInfo* pInfo = &pReader->resBlockInfo.pResBlock->info;
|
||||
pInfo->rows = pBlockInfo->record.numRow;
|
||||
pInfo->id.uid = pScanInfo->uid;
|
||||
pInfo->dataLoad = 0;
|
||||
pInfo->window = (STimeWindow){.skey = pBlockInfo->record.firstKey, .ekey = pBlockInfo->record.lastKey};
|
||||
setComposedBlockFlag(pReader, false);
|
||||
setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlockInfo->record.lastKey, pReader->info.order);
|
||||
|
||||
// update the last key for the corresponding table
|
||||
pScanInfo->lastKey = ASCENDING_TRAVERSE(pReader->info.order) ? pInfo->window.ekey : pInfo->window.skey;
|
||||
tsdbDebug("%p uid:%" PRIu64
|
||||
" clean file block retrieved from file, global index:%d, "
|
||||
"table index:%d, rows:%d, brange:%" PRId64 "-%" PRId64 ", %s",
|
||||
pReader, pScanInfo->uid, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlockInfo->record.numRow,
|
||||
pBlockInfo->record.firstKey, pBlockInfo->record.lastKey, pReader->idStr);
|
||||
}
|
||||
}
|
||||
|
||||
return (pReader->code != TSDB_CODE_SUCCESS) ? pReader->code : code;
|
||||
|
@ -4109,6 +4097,11 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) {
|
|||
|
||||
tsdbDataFileReaderClose(&pReader->pFileReader);
|
||||
|
||||
int64_t loadBlocks = 0;
|
||||
double elapse = 0;
|
||||
pReader->status.pLDataIterArray = destroySttBlockReader(pReader->status.pLDataIterArray, &loadBlocks, &elapse);
|
||||
pReader->status.pLDataIterArray = taosArrayInit(4, POINTER_BYTES);
|
||||
|
||||
// resetDataBlockScanInfo excluding lastKey
|
||||
STableBlockScanInfo** p = NULL;
|
||||
int32_t iter = 0;
|
||||
|
@ -4179,7 +4172,7 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) {
|
|||
}
|
||||
}
|
||||
|
||||
tsdbUntakeReadSnap(pReader, pReader->pReadSnap, false);
|
||||
tsdbUntakeReadSnap2(pReader, pReader->pReadSnap, false);
|
||||
pReader->pReadSnap = NULL;
|
||||
pReader->flag = READER_STATUS_SUSPEND;
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#include "tsdbUtil2.h"
|
||||
#include "tsimplehash.h"
|
||||
|
||||
static int32_t uidComparFunc(const void* p1, const void* p2) {
|
||||
int32_t uidComparFunc(const void* p1, const void* p2) {
|
||||
uint64_t pu1 = *(uint64_t*)p1;
|
||||
uint64_t pu2 = *(uint64_t*)p2;
|
||||
if (pu1 == pu2) {
|
||||
|
|
|
@ -36,6 +36,16 @@ typedef enum {
|
|||
EXTERNAL_ROWS_NEXT = 0x3,
|
||||
} EContentData;
|
||||
|
||||
typedef struct STsdbReaderInfo {
|
||||
uint64_t suid;
|
||||
STSchema* pSchema;
|
||||
EReadMode readMode;
|
||||
uint64_t rowsNum;
|
||||
STimeWindow window;
|
||||
SVersionRange verRange;
|
||||
int16_t order;
|
||||
} STsdbReaderInfo;
|
||||
|
||||
typedef struct SBlockInfoBuf {
|
||||
int32_t currentIndex;
|
||||
SArray* pData;
|
||||
|
@ -215,6 +225,8 @@ typedef struct SBrinRecordIter {
|
|||
SBrinRecord record;
|
||||
} SBrinRecordIter;
|
||||
|
||||
int32_t uidComparFunc(const void* p1, const void* p2);
|
||||
|
||||
STableBlockScanInfo* getTableBlockScanInfo(SSHashObj* pTableMap, uint64_t uid, const char* id);
|
||||
|
||||
SSHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf* pBuf, const STableKeyInfo* idList,
|
||||
|
@ -241,6 +253,41 @@ void loadMemTombData(SArray** ppMemDelData, STbData* pMemTbData, STbData* piM
|
|||
int32_t loadDataFileTombDataForAll(STsdbReader* pReader);
|
||||
int32_t loadSttTombDataForAll(STsdbReader* pReader, SSttFileReader* pSttFileReader, SSttBlockLoadInfo* pLoadInfo);
|
||||
|
||||
typedef struct {
|
||||
SArray* pTombData;
|
||||
} STableLoadInfo;
|
||||
|
||||
struct SDataFileReader;
|
||||
|
||||
typedef struct SCacheRowsReader {
|
||||
STsdb* pTsdb;
|
||||
STsdbReaderInfo info;
|
||||
TdThreadMutex readerMutex;
|
||||
SVnode* pVnode;
|
||||
STSchema* pSchema;
|
||||
STSchema* pCurrSchema;
|
||||
uint64_t uid;
|
||||
char** transferBuf; // todo remove it soon
|
||||
int32_t numOfCols;
|
||||
SArray* pCidList;
|
||||
int32_t* pSlotIds;
|
||||
int32_t type;
|
||||
int32_t tableIndex; // currently returned result tables
|
||||
STableKeyInfo* pTableList; // table id list
|
||||
int32_t numOfTables;
|
||||
uint64_t* uidList;
|
||||
SSHashObj* pTableMap;
|
||||
SArray* pLDataIterArray;
|
||||
struct SDataFileReader* pFileReader;
|
||||
STFileSet* pCurFileSet;
|
||||
const TBrinBlkArray* pBlkArray;
|
||||
STsdbReadSnap* pReadSnap;
|
||||
char* idstr;
|
||||
int64_t lastTs;
|
||||
} SCacheRowsReader;
|
||||
|
||||
int32_t tsdbCacheGetBatch(STsdb* pTsdb, tb_uid_t uid, SArray* pLastArray, SCacheRowsReader* pr, int8_t ltype);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -100,7 +100,7 @@ static int32_t tsdbDoMigrateFileObj(SRTNer *rtner, const STFileObj *fobj, const
|
|||
.type = fobj->f->type,
|
||||
.did = did[0],
|
||||
.fid = fobj->f->fid,
|
||||
.cid = rtner->cid,
|
||||
.cid = fobj->f->cid,
|
||||
.size = fobj->f->size,
|
||||
.stt[0] =
|
||||
{
|
||||
|
@ -125,6 +125,7 @@ _exit:
|
|||
|
||||
typedef struct {
|
||||
STsdb *tsdb;
|
||||
int32_t sync;
|
||||
int64_t now;
|
||||
} SRtnArg;
|
||||
|
||||
|
@ -251,28 +252,33 @@ _exit:
|
|||
if (code) {
|
||||
TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code);
|
||||
}
|
||||
taosMemoryFree(arg);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tsdbAsyncRetention(STsdb *tsdb, int64_t now, int64_t *taskid) {
|
||||
static void tsdbFreeRtnArg(void *arg) {
|
||||
SRtnArg *rArg = (SRtnArg *)arg;
|
||||
if (rArg->sync) {
|
||||
tsem_post(&rArg->tsdb->pVnode->canCommit);
|
||||
}
|
||||
taosMemoryFree(arg);
|
||||
}
|
||||
|
||||
int32_t tsdbRetention(STsdb *tsdb, int64_t now, int32_t sync) {
|
||||
SRtnArg *arg = taosMemoryMalloc(sizeof(*arg));
|
||||
if (arg == NULL) return TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
||||
arg->tsdb = tsdb;
|
||||
arg->sync = sync;
|
||||
arg->now = now;
|
||||
|
||||
int32_t code = tsdbFSScheduleBgTask(tsdb->pFS, TSDB_BG_TASK_RETENTION, tsdbDoRetention2, arg, taskid);
|
||||
if (code) taosMemoryFree(arg);
|
||||
if (sync) {
|
||||
tsem_wait(&tsdb->pVnode->canCommit);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tsdbSyncRetention(STsdb *tsdb, int64_t now) {
|
||||
int64_t taskid;
|
||||
|
||||
int32_t code = tsdbAsyncRetention(tsdb, now, &taskid);
|
||||
if (code) return code;
|
||||
|
||||
return tsdbFSWaitBgTask(tsdb->pFS, taskid);
|
||||
int32_t code =
|
||||
tsdbFSScheduleBgTask(tsdb->pFS, TSDB_BG_TASK_RETENTION, tsdbDoRetention2, tsdbFreeRtnArg, arg, &taskid);
|
||||
if (code) {
|
||||
tsdbFreeRtnArg(arg);
|
||||
}
|
||||
return code;
|
||||
}
|
|
@ -975,6 +975,11 @@ int32_t tsdbSttFileWriteTombRecord(SSttFileWriter *writer, const STombRecord *re
|
|||
_exit:
|
||||
if (code) {
|
||||
TSDB_ERROR_LOG(TD_VID(writer->config->tsdb->pVnode), lino, code);
|
||||
} else {
|
||||
tsdbTrace("vgId:%d write tomb record to stt file:%s, cid:%" PRId64 ", suid:%" PRId64 ", uid:%" PRId64
|
||||
", version:%" PRId64,
|
||||
TD_VID(writer->config->tsdb->pVnode), writer->fd->path, writer->config->cid, record->suid, record->uid,
|
||||
record->version);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -180,6 +180,8 @@ void initStateStoreAPI(SStateStore* pStore) {
|
|||
pStore->updateInfoIsUpdated = updateInfoIsUpdated;
|
||||
pStore->updateInfoIsTableInserted = updateInfoIsTableInserted;
|
||||
pStore->updateInfoDestroy = updateInfoDestroy;
|
||||
pStore->windowSBfDelete = windowSBfDelete;
|
||||
pStore->windowSBfAdd = windowSBfAdd;
|
||||
|
||||
pStore->updateInfoInitP = updateInfoInitP;
|
||||
pStore->updateInfoAddCloseWindowSBF = updateInfoAddCloseWindowSBF;
|
||||
|
|
|
@ -476,8 +476,8 @@ void vnodeClose(SVnode *pVnode) {
|
|||
tsem_wait(&pVnode->canCommit);
|
||||
vnodeSyncClose(pVnode);
|
||||
vnodeQueryClose(pVnode);
|
||||
walClose(pVnode->pWal);
|
||||
tqClose(pVnode->pTq);
|
||||
walClose(pVnode->pWal);
|
||||
if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb);
|
||||
smaClose(pVnode->pSma);
|
||||
if (pVnode->pMeta) metaClose(&pVnode->pMeta);
|
||||
|
|
|
@ -15,27 +15,8 @@
|
|||
|
||||
#include "vnd.h"
|
||||
|
||||
extern int32_t tsdbSyncRetention(STsdb *tsdb, int64_t now);
|
||||
extern int32_t tsdbAsyncRetention(STsdb *tsdb, int64_t now, int64_t *taskid);
|
||||
extern int32_t tsdbRetention(STsdb *tsdb, int64_t now, int32_t sync);
|
||||
|
||||
int32_t vnodeDoRetention(SVnode *pVnode, int64_t now) {
|
||||
int32_t code;
|
||||
int32_t lino;
|
||||
|
||||
if (pVnode->config.sttTrigger == 1) {
|
||||
tsem_wait(&pVnode->canCommit);
|
||||
code = tsdbSyncRetention(pVnode->pTsdb, now);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
// code = smaDoRetention(pVnode->pSma, now);
|
||||
// TSDB_CHECK_CODE(code, lino, _exit);
|
||||
tsem_post(&pVnode->canCommit);
|
||||
} else {
|
||||
int64_t taskid;
|
||||
code = tsdbAsyncRetention(pVnode->pTsdb, now, &taskid);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
||||
_exit:
|
||||
return code;
|
||||
return tsdbRetention(pVnode->pTsdb, now, pVnode->config.sttTrigger == 1);
|
||||
}
|
|
@ -624,6 +624,11 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
|
|||
// return tqProcessPollReq(pVnode->pTq, pMsg);
|
||||
case TDMT_VND_TMQ_VG_WALINFO:
|
||||
return tqProcessVgWalInfoReq(pVnode->pTq, pMsg);
|
||||
case TDMT_VND_TMQ_VG_COMMITTEDINFO:
|
||||
return tqProcessVgCommittedInfoReq(pVnode->pTq, pMsg);
|
||||
case TDMT_VND_TMQ_SEEK:
|
||||
return tqProcessSeekReq(pVnode->pTq, pMsg);
|
||||
|
||||
default:
|
||||
vError("unknown msg type:%d in fetch queue", pMsg->msgType);
|
||||
return TSDB_CODE_APP_ERROR;
|
||||
|
|
|
@ -63,8 +63,8 @@ typedef struct {
|
|||
SSchemaWrapper* schema;
|
||||
char tbName[TSDB_TABLE_NAME_LEN]; // this is the current scan table: todo refactor
|
||||
int8_t recoverStep;
|
||||
bool recoverStep1Finished;
|
||||
bool recoverStep2Finished;
|
||||
// bool recoverStep1Finished;
|
||||
// bool recoverStep2Finished;
|
||||
int8_t recoverScanFinished;
|
||||
SQueryTableDataCond tableCond;
|
||||
SVersionRange fillHistoryVer;
|
||||
|
|
|
@ -116,17 +116,6 @@ void resetTaskInfo(qTaskInfo_t tinfo) {
|
|||
clearStreamBlock(pTaskInfo->pRoot);
|
||||
}
|
||||
|
||||
void qResetStreamInfoTimeWindow(qTaskInfo_t tinfo) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*) tinfo;
|
||||
if (pTaskInfo == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
qDebug("%s set stream fill-history window:%" PRId64"-%"PRId64, GET_TASKID(pTaskInfo), INT64_MIN, INT64_MAX);
|
||||
pTaskInfo->streamInfo.fillHistoryWindow.skey = INT64_MIN;
|
||||
pTaskInfo->streamInfo.fillHistoryWindow.ekey = INT64_MAX;
|
||||
}
|
||||
|
||||
static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, const char* id) {
|
||||
if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
|
||||
if (pOperator->numOfDownstream == 0) {
|
||||
|
@ -197,11 +186,6 @@ void qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId) {
|
|||
doSetTaskId(pTaskInfo->pRoot, &pTaskInfo->storageAPI);
|
||||
}
|
||||
|
||||
//void qSetTaskCode(qTaskInfo_t tinfo, int32_t code) {
|
||||
// SExecTaskInfo* pTaskInfo = tinfo;
|
||||
// pTaskInfo->code = code;
|
||||
//}
|
||||
|
||||
int32_t qSetStreamOpOpen(qTaskInfo_t tinfo) {
|
||||
if (tinfo == NULL) {
|
||||
return TSDB_CODE_APP_ERROR;
|
||||
|
@ -341,6 +325,7 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers, int32_t v
|
|||
return NULL;
|
||||
}
|
||||
|
||||
qStreamInfoResetTimewindowFilter(pTaskInfo);
|
||||
return pTaskInfo;
|
||||
}
|
||||
|
||||
|
@ -693,23 +678,33 @@ int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) {
|
|||
|
||||
*pRes = NULL;
|
||||
int64_t curOwner = 0;
|
||||
if ((curOwner = atomic_val_compare_exchange_64(&pTaskInfo->owner, 0, threadId)) != 0) {
|
||||
|
||||
// todo extract method
|
||||
taosRLockLatch(&pTaskInfo->lock);
|
||||
bool isKilled = isTaskKilled(pTaskInfo);
|
||||
if (isKilled) {
|
||||
clearStreamBlock(pTaskInfo->pRoot);
|
||||
qDebug("%s already killed, abort", GET_TASKID(pTaskInfo));
|
||||
|
||||
taosRUnLockLatch(&pTaskInfo->lock);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (pTaskInfo->owner != 0) {
|
||||
qError("%s-%p execTask is now executed by thread:%p", GET_TASKID(pTaskInfo), pTaskInfo, (void*)curOwner);
|
||||
pTaskInfo->code = TSDB_CODE_QRY_IN_EXEC;
|
||||
|
||||
taosRUnLockLatch(&pTaskInfo->lock);
|
||||
return pTaskInfo->code;
|
||||
}
|
||||
|
||||
pTaskInfo->owner = threadId;
|
||||
taosRUnLockLatch(&pTaskInfo->lock);
|
||||
|
||||
if (pTaskInfo->cost.start == 0) {
|
||||
pTaskInfo->cost.start = taosGetTimestampUs();
|
||||
}
|
||||
|
||||
if (isTaskKilled(pTaskInfo)) {
|
||||
clearStreamBlock(pTaskInfo->pRoot);
|
||||
atomic_store_64(&pTaskInfo->owner, 0);
|
||||
qDebug("%s already killed, abort", GET_TASKID(pTaskInfo));
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// error occurs, record the error code and return to client
|
||||
int32_t ret = setjmp(pTaskInfo->env);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
|
@ -813,11 +808,13 @@ int32_t qKillTask(qTaskInfo_t tinfo, int32_t rspCode) {
|
|||
qDebug("%s sync killed execTask", GET_TASKID(pTaskInfo));
|
||||
setTaskKilled(pTaskInfo, TSDB_CODE_TSC_QUERY_KILLED);
|
||||
|
||||
taosWLockLatch(&pTaskInfo->lock);
|
||||
while (qTaskIsExecuting(pTaskInfo)) {
|
||||
taosMsleep(10);
|
||||
}
|
||||
|
||||
pTaskInfo->code = rspCode;
|
||||
taosWUnLockLatch(&pTaskInfo->lock);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -921,8 +918,6 @@ int32_t qStreamSourceScanParamForHistoryScanStep1(qTaskInfo_t tinfo, SVersionRan
|
|||
pStreamInfo->fillHistoryVer = *pVerRange;
|
||||
pStreamInfo->fillHistoryWindow = *pWindow;
|
||||
pStreamInfo->recoverStep = STREAM_RECOVER_STEP__PREPARE1;
|
||||
pStreamInfo->recoverStep1Finished = false;
|
||||
pStreamInfo->recoverStep2Finished = false;
|
||||
|
||||
qDebug("%s step 1. set param for stream scanner for scan-history data, verRange:%" PRId64 " - %" PRId64 ", window:%" PRId64
|
||||
" - %" PRId64,
|
||||
|
@ -940,8 +935,6 @@ int32_t qStreamSourceScanParamForHistoryScanStep2(qTaskInfo_t tinfo, SVersionRan
|
|||
pStreamInfo->fillHistoryVer = *pVerRange;
|
||||
pStreamInfo->fillHistoryWindow = *pWindow;
|
||||
pStreamInfo->recoverStep = STREAM_RECOVER_STEP__PREPARE2;
|
||||
pStreamInfo->recoverStep1Finished = true;
|
||||
pStreamInfo->recoverStep2Finished = false;
|
||||
|
||||
qDebug("%s step 2. set param for stream scanner for scan-history data, verRange:%" PRId64 " - %" PRId64
|
||||
", window:%" PRId64 " - %" PRId64,
|
||||
|
@ -1080,23 +1073,15 @@ bool qStreamRecoverScanFinished(qTaskInfo_t tinfo) {
|
|||
return pTaskInfo->streamInfo.recoverScanFinished;
|
||||
}
|
||||
|
||||
bool qStreamRecoverScanStep1Finished(qTaskInfo_t tinfo) {
|
||||
int32_t qStreamInfoResetTimewindowFilter(qTaskInfo_t tinfo) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
return pTaskInfo->streamInfo.recoverStep1Finished;
|
||||
}
|
||||
STimeWindow* pWindow = &pTaskInfo->streamInfo.fillHistoryWindow;
|
||||
|
||||
bool qStreamRecoverScanStep2Finished(qTaskInfo_t tinfo) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
return pTaskInfo->streamInfo.recoverStep2Finished;
|
||||
}
|
||||
qDebug("%s set remove scan-history filter window:%" PRId64 "-%" PRId64 ", new window:%" PRId64 "-%" PRId64,
|
||||
GET_TASKID(pTaskInfo), pWindow->skey, pWindow->ekey, INT64_MIN, INT64_MAX);
|
||||
|
||||
int32_t qStreamRecoverSetAllStepFinished(qTaskInfo_t tinfo) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
pTaskInfo->streamInfo.recoverStep1Finished = true;
|
||||
pTaskInfo->streamInfo.recoverStep2Finished = true;
|
||||
|
||||
// reset the time window
|
||||
pTaskInfo->streamInfo.fillHistoryWindow.skey = INT64_MIN;
|
||||
pWindow->skey = INT64_MIN;
|
||||
pWindow->ekey = INT64_MAX;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1650,10 +1650,99 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock, bool filter) {
|
||||
static void doBlockDataWindowFilter(SSDataBlock* pBlock, int32_t tsIndex, STimeWindow* pWindow, const char* id) {
|
||||
if (pWindow->skey != INT64_MIN || pWindow->ekey != INT64_MAX) {
|
||||
bool* p = taosMemoryCalloc(pBlock->info.rows, sizeof(bool));
|
||||
bool hasUnqualified = false;
|
||||
|
||||
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, tsIndex);
|
||||
|
||||
if (pWindow->skey != INT64_MIN) {
|
||||
qDebug("%s filter for additional history window, skey:%" PRId64, id, pWindow->skey);
|
||||
|
||||
ASSERT(pCol->pData != NULL);
|
||||
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
|
||||
int64_t* ts = (int64_t*)colDataGetData(pCol, i);
|
||||
p[i] = (*ts >= pWindow->skey);
|
||||
|
||||
if (!p[i]) {
|
||||
hasUnqualified = true;
|
||||
}
|
||||
}
|
||||
} else if (pWindow->ekey != INT64_MAX) {
|
||||
qDebug("%s filter for additional history window, ekey:%" PRId64, id, pWindow->ekey);
|
||||
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
|
||||
int64_t* ts = (int64_t*)colDataGetData(pCol, i);
|
||||
p[i] = (*ts <= pWindow->ekey);
|
||||
|
||||
if (!p[i]) {
|
||||
hasUnqualified = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (hasUnqualified) {
|
||||
trimDataBlock(pBlock, pBlock->info.rows, p);
|
||||
}
|
||||
|
||||
taosMemoryFree(p);
|
||||
}
|
||||
}
|
||||
|
||||
// re-build the delete block, ONLY according to the split timestamp
|
||||
static void rebuildDeleteBlockData(SSDataBlock* pBlock, STimeWindow* pWindow, const char* id) {
|
||||
int32_t numOfRows = pBlock->info.rows;
|
||||
bool* p = taosMemoryCalloc(numOfRows, sizeof(bool));
|
||||
bool hasUnqualified = false;
|
||||
int64_t skey = pWindow->skey;
|
||||
int64_t ekey = pWindow->ekey;
|
||||
|
||||
SColumnInfoData* pSrcStartCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
|
||||
uint64_t* tsStartCol = (uint64_t*)pSrcStartCol->pData;
|
||||
SColumnInfoData* pSrcEndCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
|
||||
uint64_t* tsEndCol = (uint64_t*)pSrcEndCol->pData;
|
||||
|
||||
if (pWindow->skey != INT64_MIN) {
|
||||
for (int32_t i = 0; i < numOfRows; i++) {
|
||||
if (tsStartCol[i] < skey) {
|
||||
tsStartCol[i] = skey;
|
||||
}
|
||||
|
||||
if (tsEndCol[i] >= skey) {
|
||||
p[i] = true;
|
||||
} else { // this row should be removed, since it is not in this query time window, which is [skey, INT64_MAX]
|
||||
hasUnqualified = true;
|
||||
}
|
||||
}
|
||||
} else if (pWindow->ekey != INT64_MAX) {
|
||||
for(int32_t i = 0; i < numOfRows; ++i) {
|
||||
if (tsEndCol[i] > ekey) {
|
||||
tsEndCol[i] = ekey;
|
||||
}
|
||||
|
||||
if (tsStartCol[i] <= ekey) {
|
||||
p[i] = true;
|
||||
} else {
|
||||
hasUnqualified = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (hasUnqualified) {
|
||||
trimDataBlock(pBlock, pBlock->info.rows, p);
|
||||
qDebug("%s re-build delete datablock, start key revised to:%"PRId64", rows:%"PRId64, id, skey, pBlock->info.rows);
|
||||
} else {
|
||||
qDebug("%s not update the delete block", id);
|
||||
}
|
||||
|
||||
taosMemoryFree(p);
|
||||
}
|
||||
|
||||
static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock, STimeWindow* pTimeWindow, bool filter) {
|
||||
SDataBlockInfo* pBlockInfo = &pInfo->pRes->info;
|
||||
SOperatorInfo* pOperator = pInfo->pStreamScanOp;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
const char* id = GET_TASKID(pTaskInfo);
|
||||
|
||||
blockDataEnsureCapacity(pInfo->pRes, pBlock->info.rows);
|
||||
|
||||
|
@ -1693,7 +1782,7 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
|
|||
// currently only the tbname pseudo column
|
||||
if (pInfo->numOfPseudoExpr > 0) {
|
||||
int32_t code = addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pInfo->pRes,
|
||||
pBlockInfo->rows, GET_TASKID(pTaskInfo), &pTableScanInfo->base.metaCache);
|
||||
pBlockInfo->rows, id, &pTableScanInfo->base.metaCache);
|
||||
// ignore the table not exists error, since this table may have been dropped during the scan procedure.
|
||||
if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_PAR_TABLE_NOT_EXIST) {
|
||||
blockDataFreeRes((SSDataBlock*)pBlock);
|
||||
|
@ -1708,8 +1797,14 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
|
|||
doFilter(pInfo->pRes, pOperator->exprSupp.pFilterInfo, NULL);
|
||||
}
|
||||
|
||||
// filter the block extracted from WAL files, according to the time window apply additional time window filter
|
||||
doBlockDataWindowFilter(pInfo->pRes, pInfo->primaryTsIndex, pTimeWindow, id);
|
||||
pInfo->pRes->info.dataLoad = 1;
|
||||
|
||||
blockDataUpdateTsWindow(pInfo->pRes, pInfo->primaryTsIndex);
|
||||
if (pInfo->pRes->info.rows == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
calBlockTbName(pInfo, pInfo->pRes);
|
||||
return 0;
|
||||
|
@ -1766,7 +1861,8 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
|
|||
qDebug("doQueueScan get data from log %" PRId64 " rows, version:%" PRId64, pRes->info.rows,
|
||||
pTaskInfo->streamInfo.currentOffset.version);
|
||||
blockDataCleanup(pInfo->pRes);
|
||||
setBlockIntoRes(pInfo, pRes, true);
|
||||
STimeWindow defaultWindow = {.skey = INT64_MIN, .ekey = INT64_MAX};
|
||||
setBlockIntoRes(pInfo, pRes, &defaultWindow, true);
|
||||
if (pInfo->pRes->info.rows > 0) {
|
||||
return pInfo->pRes;
|
||||
}
|
||||
|
@ -1875,80 +1971,6 @@ void streamScanOperatorDecode(void* pBuff, int32_t len, SStreamScanInfo* pInfo)
|
|||
}
|
||||
}
|
||||
|
||||
static void doBlockDataWindowFilter(SSDataBlock* pBlock, int32_t tsIndex, STimeWindow* pWindow, const char* id) {
|
||||
if (pWindow->skey != INT64_MIN || pWindow->ekey != INT64_MAX) {
|
||||
bool* p = taosMemoryCalloc(pBlock->info.rows, sizeof(bool));
|
||||
bool hasUnqualified = false;
|
||||
|
||||
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, tsIndex);
|
||||
|
||||
if (pWindow->skey != INT64_MIN) {
|
||||
qDebug("%s filter for additional history window, skey:%" PRId64, id, pWindow->skey);
|
||||
|
||||
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
|
||||
int64_t* ts = (int64_t*)colDataGetData(pCol, i);
|
||||
p[i] = (*ts >= pWindow->skey);
|
||||
|
||||
if (!p[i]) {
|
||||
hasUnqualified = true;
|
||||
}
|
||||
}
|
||||
} else if (pWindow->ekey != INT64_MAX) {
|
||||
qDebug("%s filter for additional history window, ekey:%" PRId64, id, pWindow->ekey);
|
||||
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
|
||||
int64_t* ts = (int64_t*)colDataGetData(pCol, i);
|
||||
p[i] = (*ts <= pWindow->ekey);
|
||||
|
||||
if (!p[i]) {
|
||||
hasUnqualified = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (hasUnqualified) {
|
||||
trimDataBlock(pBlock, pBlock->info.rows, p);
|
||||
}
|
||||
|
||||
taosMemoryFree(p);
|
||||
}
|
||||
}
|
||||
|
||||
// re-build the delete block, ONLY according to the split timestamp
|
||||
static void rebuildDeleteBlockData(SSDataBlock* pBlock, int64_t skey, const char* id) {
|
||||
if (skey == INT64_MIN) {
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t numOfRows = pBlock->info.rows;
|
||||
|
||||
bool* p = taosMemoryCalloc(numOfRows, sizeof(bool));
|
||||
bool hasUnqualified = false;
|
||||
|
||||
SColumnInfoData* pSrcStartCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
|
||||
uint64_t* tsStartCol = (uint64_t*)pSrcStartCol->pData;
|
||||
SColumnInfoData* pSrcEndCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
|
||||
uint64_t* tsEndCol = (uint64_t*)pSrcEndCol->pData;
|
||||
|
||||
for (int32_t i = 0; i < numOfRows; i++) {
|
||||
if (tsStartCol[i] < skey) {
|
||||
tsStartCol[i] = skey;
|
||||
}
|
||||
|
||||
if (tsEndCol[i] >= skey) {
|
||||
p[i] = true;
|
||||
} else { // this row should be removed, since it is not in this query time window, which is [skey, INT64_MAX]
|
||||
hasUnqualified = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (hasUnqualified) {
|
||||
trimDataBlock(pBlock, pBlock->info.rows, p);
|
||||
}
|
||||
|
||||
qDebug("%s re-build delete datablock, start key revised to:%"PRId64", rows:%"PRId64, id, skey, pBlock->info.rows);
|
||||
taosMemoryFree(p);
|
||||
}
|
||||
|
||||
static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
||||
// NOTE: this operator does never check if current status is done or not
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
@ -2121,7 +2143,7 @@ FETCH_NEXT_BLOCK:
|
|||
}
|
||||
|
||||
setBlockGroupIdByUid(pInfo, pDelBlock);
|
||||
rebuildDeleteBlockData(pDelBlock, pStreamInfo->fillHistoryWindow.skey, id);
|
||||
rebuildDeleteBlockData(pDelBlock, &pStreamInfo->fillHistoryWindow, id);
|
||||
printDataBlock(pDelBlock, "stream scan delete recv filtered");
|
||||
if (pDelBlock->info.rows == 0) {
|
||||
if (pInfo->tqReader) {
|
||||
|
@ -2221,8 +2243,7 @@ FETCH_NEXT_BLOCK:
|
|||
return pInfo->pUpdateRes;
|
||||
}
|
||||
|
||||
SSDataBlock* pBlock = pInfo->pRes;
|
||||
SDataBlockInfo* pBlockInfo = &pBlock->info;
|
||||
SDataBlockInfo* pBlockInfo = &pInfo->pRes->info;
|
||||
int32_t totalBlocks = taosArrayGetSize(pInfo->pBlockLists);
|
||||
|
||||
NEXT_SUBMIT_BLK:
|
||||
|
@ -2246,21 +2267,23 @@ FETCH_NEXT_BLOCK:
|
|||
}
|
||||
}
|
||||
|
||||
blockDataCleanup(pBlock);
|
||||
blockDataCleanup(pInfo->pRes);
|
||||
|
||||
while (pAPI->tqReaderFn.tqNextBlockImpl(pInfo->tqReader, id)) {
|
||||
SSDataBlock* pRes = NULL;
|
||||
|
||||
int32_t code = pAPI->tqReaderFn.tqRetrieveBlock(pInfo->tqReader, &pRes, id);
|
||||
qDebug("retrieve data from submit completed code:%s, rows:%" PRId64 " %s", tstrerror(code), pRes->info.rows,
|
||||
id);
|
||||
qDebug("retrieve data from submit completed code:%s rows:%" PRId64 " %s", tstrerror(code), pRes->info.rows, id);
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS || pRes->info.rows == 0) {
|
||||
qDebug("retrieve data failed, try next block in submit block, %s", id);
|
||||
continue;
|
||||
}
|
||||
|
||||
setBlockIntoRes(pInfo, pRes, false);
|
||||
setBlockIntoRes(pInfo, pRes, &pStreamInfo->fillHistoryWindow, false);
|
||||
if (pInfo->pRes->info.rows == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pInfo->pCreateTbRes->info.rows > 0) {
|
||||
pInfo->scanMode = STREAM_SCAN_FROM_RES;
|
||||
|
@ -2268,13 +2291,8 @@ FETCH_NEXT_BLOCK:
|
|||
return pInfo->pCreateTbRes;
|
||||
}
|
||||
|
||||
// apply additional time window filter
|
||||
doBlockDataWindowFilter(pBlock, pInfo->primaryTsIndex, &pStreamInfo->fillHistoryWindow, id);
|
||||
pBlock->info.dataLoad = 1;
|
||||
blockDataUpdateTsWindow(pBlock, pInfo->primaryTsIndex);
|
||||
|
||||
doCheckUpdate(pInfo, pBlockInfo->window.ekey, pBlock);
|
||||
doFilter(pBlock, pOperator->exprSupp.pFilterInfo, NULL);
|
||||
doCheckUpdate(pInfo, pBlockInfo->window.ekey, pInfo->pRes);
|
||||
doFilter(pInfo->pRes, pOperator->exprSupp.pFilterInfo, NULL);
|
||||
|
||||
int64_t numOfUpdateRes = pInfo->pUpdateDataRes->info.rows;
|
||||
qDebug("%s %" PRId64 " rows in datablock, update res:%" PRId64, id, pBlockInfo->rows, numOfUpdateRes);
|
||||
|
@ -2296,7 +2314,7 @@ FETCH_NEXT_BLOCK:
|
|||
|
||||
qDebug("stream scan completed, and return source rows:%" PRId64", %s", pBlockInfo->rows, id);
|
||||
if (pBlockInfo->rows > 0) {
|
||||
return pBlock;
|
||||
return pInfo->pRes;
|
||||
}
|
||||
|
||||
if (pInfo->pUpdateDataRes->info.rows > 0) {
|
||||
|
@ -2506,7 +2524,9 @@ void streamScanReloadState(SOperatorInfo* pOperator) {
|
|||
pInfo->stateStore.updateInfoDestroy(pInfo->pUpdateInfo);
|
||||
pInfo->pUpdateInfo = pUpInfo;
|
||||
} else {
|
||||
pInfo->pUpdateInfo->minTS = TMAX(pInfo->pUpdateInfo->minTS, pUpInfo->minTS);
|
||||
pInfo->stateStore.windowSBfDelete(pInfo->pUpdateInfo, 1);
|
||||
pInfo->stateStore.windowSBfAdd(pInfo->pUpdateInfo, 1);
|
||||
ASSERT(pInfo->pUpdateInfo->minTS > pUpInfo->minTS);
|
||||
pInfo->pUpdateInfo->maxDataVersion = TMAX(pInfo->pUpdateInfo->maxDataVersion, pUpInfo->maxDataVersion);
|
||||
SHashObj* curMap = pInfo->pUpdateInfo->pMap;
|
||||
void *pIte = taosHashIterate(curMap, NULL);
|
||||
|
@ -2687,7 +2707,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
|
|||
pInfo->igCheckUpdate = pTableScanNode->igCheckUpdate;
|
||||
pInfo->igExpired = pTableScanNode->igExpired;
|
||||
pInfo->twAggSup.maxTs = INT64_MIN;
|
||||
pInfo->pState = NULL;
|
||||
pInfo->pState = pTaskInfo->streamInfo.pState;
|
||||
pInfo->stateStore = pTaskInfo->storageAPI.stateStore;
|
||||
pInfo->readerFn = pTaskInfo->storageAPI.tqReaderFn;
|
||||
|
||||
|
|
|
@ -3736,7 +3736,6 @@ void streamSessionReloadState(SOperatorInfo* pOperator) {
|
|||
setSessionOutputBuf(pAggSup, pSeKeyBuf[i].win.skey, pSeKeyBuf[i].win.ekey, pSeKeyBuf[i].groupId, &winInfo);
|
||||
int32_t winNum = compactSessionWindow(pOperator, &winInfo, pInfo->pStUpdated, pInfo->pStDeleted, true);
|
||||
if (winNum > 0) {
|
||||
saveSessionOutputBuf(pAggSup, &winInfo);
|
||||
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
|
||||
saveResult(winInfo, pInfo->pStUpdated);
|
||||
} else if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
|
||||
|
@ -3747,9 +3746,8 @@ void streamSessionReloadState(SOperatorInfo* pOperator) {
|
|||
getSessionHashKey(&winInfo.sessionWin, &key);
|
||||
tSimpleHashPut(pAggSup->pResultRows, &key, sizeof(SSessionKey), &winInfo, sizeof(SResultWindowInfo));
|
||||
}
|
||||
} else {
|
||||
releaseOutputBuf(pAggSup->pState, NULL, (SResultRow*)winInfo.pOutputBuf, &pAggSup->stateStore);
|
||||
}
|
||||
saveSessionOutputBuf(pAggSup, &winInfo);
|
||||
}
|
||||
taosMemoryFree(pBuf);
|
||||
|
||||
|
@ -4059,13 +4057,20 @@ bool isEqualStateKey(SStateWindowInfo* pWin, char* pKeyData) {
|
|||
|
||||
bool compareStateKey(void* data, void* key) {
|
||||
if (!data || !key) {
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
SStateKeys* stateKey = (SStateKeys*)key;
|
||||
stateKey->pData = (char*)key + sizeof(SStateKeys);
|
||||
return compareVal(data, stateKey);
|
||||
}
|
||||
|
||||
bool compareWinStateKey(SStateKeys* left, SStateKeys* right) {
|
||||
if (!left || !right) {
|
||||
return false;
|
||||
}
|
||||
return compareVal(left->pData, right);
|
||||
}
|
||||
|
||||
void setStateOutputBuf(SStreamAggSupporter* pAggSup, TSKEY ts, uint64_t groupId, char* pKeyData,
|
||||
SStateWindowInfo* pCurWin, SStateWindowInfo* pNextWin) {
|
||||
int32_t size = pAggSup->resultRowSize;
|
||||
|
@ -4088,10 +4093,14 @@ void setStateOutputBuf(SStreamAggSupporter* pAggSup, TSKEY ts, uint64_t groupId,
|
|||
pCurWin->winInfo.pOutputBuf = taosMemoryCalloc(1, size);
|
||||
pCurWin->pStateKey =
|
||||
(SStateKeys*)((char*)pCurWin->winInfo.pOutputBuf + (pAggSup->resultRowSize - pAggSup->stateKeySize));
|
||||
pCurWin->pStateKey->bytes = pAggSup->stateKeySize - sizeof(SStateKeys);
|
||||
pCurWin->pStateKey->type = pAggSup->stateKeyType;
|
||||
pCurWin->pStateKey->pData = (char*)pCurWin->pStateKey + sizeof(SStateKeys);
|
||||
pCurWin->pStateKey->isNull = false;
|
||||
pCurWin->pStateKey->bytes = pAggSup->stateKeySize - sizeof(SStateKeys);
|
||||
pCurWin->pStateKey->type = pAggSup->stateKeyType;
|
||||
pCurWin->pStateKey->pData = (char*)pCurWin->pStateKey + sizeof(SStateKeys);
|
||||
pCurWin->pStateKey->isNull = false;
|
||||
pCurWin->winInfo.sessionWin.groupId = groupId;
|
||||
pCurWin->winInfo.sessionWin.win.skey = ts;
|
||||
pCurWin->winInfo.sessionWin.win.ekey = ts;
|
||||
qDebug("===stream===reset state win key. skey:%" PRId64 ", endkey:%" PRId64, pCurWin->winInfo.sessionWin.win.skey, pCurWin->winInfo.sessionWin.win.ekey);
|
||||
}
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
|
@ -4243,6 +4252,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
|
|||
SExprSupp* pSup = &pOperator->exprSupp;
|
||||
SStreamStateAggOperatorInfo* pInfo = pOperator->info;
|
||||
SOptrBasicInfo* pBInfo = &pInfo->binfo;
|
||||
qDebug("===stream=== stream state agg");
|
||||
if (pOperator->status == OP_RES_TO_RETURN) {
|
||||
doBuildDeleteDataBlock(pOperator, pInfo->pSeDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
|
||||
if (pInfo->pDelRes->info.rows > 0) {
|
||||
|
@ -4342,6 +4352,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
|
|||
void streamStateReleaseState(SOperatorInfo* pOperator) {
|
||||
SStreamStateAggOperatorInfo* pInfo = pOperator->info;
|
||||
int32_t resSize = taosArrayGetSize(pInfo->historyWins) * sizeof(SSessionKey);
|
||||
qDebug("===stream=== relase state. save result count:%d", (int32_t)taosArrayGetSize(pInfo->historyWins));
|
||||
pInfo->streamAggSup.stateStore.streamStateSaveInfo(pInfo->streamAggSup.pState, STREAM_STATE_OP_STATE_NAME, strlen(STREAM_STATE_OP_STATE_NAME), pInfo->historyWins->pData, resSize);
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
if (downstream->fpSet.releaseStreamStateFn) {
|
||||
|
@ -4367,6 +4378,7 @@ static void compactStateWindow(SOperatorInfo* pOperator, SResultWindowInfo* pCur
|
|||
compactFunctions(pSup->pCtx, pAggSup->pDummyCtx, numOfOutput, pTaskInfo, &pInfo->twAggSup.timeWindowData);
|
||||
tSimpleHashRemove(pStUpdated, &pNextWin->sessionWin, sizeof(SSessionKey));
|
||||
if (pNextWin->isOutput && pStDeleted) {
|
||||
qDebug("===stream=== save delete window info %" PRId64 ", %" PRIu64, pNextWin->sessionWin.win.skey, pNextWin->sessionWin.groupId);
|
||||
saveDeleteRes(pStDeleted, pNextWin->sessionWin);
|
||||
}
|
||||
removeSessionResult(pStUpdated, pAggSup->pResultRows, pNextWin->sessionWin);
|
||||
|
@ -4385,20 +4397,28 @@ void streamStateReloadState(SOperatorInfo* pOperator) {
|
|||
int32_t code = pAggSup->stateStore.streamStateGetInfo(pAggSup->pState, STREAM_STATE_OP_STATE_NAME,
|
||||
strlen(STREAM_STATE_OP_STATE_NAME), &pBuf, &size);
|
||||
int32_t num = size / sizeof(SSessionKey);
|
||||
qDebug("===stream=== reload state. get result count:%d", num);
|
||||
SSessionKey* pSeKeyBuf = (SSessionKey*) pBuf;
|
||||
ASSERT(size == num * sizeof(SSessionKey));
|
||||
if (!pInfo->pSeUpdated && num > 0) {
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pInfo->pSeUpdated = tSimpleHashInit(64, hashFn);
|
||||
}
|
||||
if (!pInfo->pSeDeleted && num > 0) {
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pInfo->pSeDeleted = tSimpleHashInit(64, hashFn);
|
||||
}
|
||||
for (int32_t i = 0; i < num; i++) {
|
||||
SStateWindowInfo curInfo = {0};
|
||||
SStateWindowInfo nextInfo = {0};
|
||||
SStateWindowInfo dummy = {0};
|
||||
qDebug("===stream=== reload state. try process result %" PRId64 ", %" PRIu64 ", index:%d", pSeKeyBuf[i].win.skey, pSeKeyBuf[i].groupId, i);
|
||||
setStateOutputBuf(pAggSup, pSeKeyBuf[i].win.skey, pSeKeyBuf[i].groupId, NULL, &curInfo, &nextInfo);
|
||||
if (compareStateKey(curInfo.pStateKey,nextInfo.pStateKey)) {
|
||||
compactStateWindow(pOperator, &curInfo.winInfo, &nextInfo.winInfo, pInfo->pSeUpdated, pInfo->pSeUpdated);
|
||||
saveSessionOutputBuf(pAggSup, &curInfo.winInfo);
|
||||
bool cpRes = compareWinStateKey(curInfo.pStateKey,nextInfo.pStateKey);
|
||||
qDebug("===stream=== reload state. next window info %" PRId64 ", %" PRIu64 ", compare:%d", nextInfo.winInfo.sessionWin.win.skey, nextInfo.winInfo.sessionWin.groupId, cpRes);
|
||||
if (cpRes) {
|
||||
compactStateWindow(pOperator, &curInfo.winInfo, &nextInfo.winInfo, pInfo->pSeUpdated, pInfo->pSeDeleted);
|
||||
qDebug("===stream=== reload state. save result %" PRId64 ", %" PRIu64, curInfo.winInfo.sessionWin.win.skey, curInfo.winInfo.sessionWin.groupId);
|
||||
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
|
||||
saveResult(curInfo.winInfo, pInfo->pSeUpdated);
|
||||
} else if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
|
||||
|
@ -4409,14 +4429,12 @@ void streamStateReloadState(SOperatorInfo* pOperator) {
|
|||
getSessionHashKey(&curInfo.winInfo.sessionWin, &key);
|
||||
tSimpleHashPut(pAggSup->pResultRows, &key, sizeof(SSessionKey), &curInfo.winInfo, sizeof(SResultWindowInfo));
|
||||
}
|
||||
} else if (IS_VALID_SESSION_WIN(nextInfo.winInfo)) {
|
||||
releaseOutputBuf(pAggSup->pState, NULL, (SResultRow*)nextInfo.winInfo.pOutputBuf, &pAggSup->pSessionAPI->stateStore);
|
||||
}
|
||||
|
||||
if (IS_VALID_SESSION_WIN(curInfo.winInfo)) {
|
||||
releaseOutputBuf(pAggSup->pState, NULL, (SResultRow*)curInfo.winInfo.pOutputBuf, &pAggSup->pSessionAPI->stateStore);
|
||||
}
|
||||
|
||||
if (IS_VALID_SESSION_WIN(nextInfo.winInfo)) {
|
||||
releaseOutputBuf(pAggSup->pState, NULL, (SResultRow*)nextInfo.winInfo.pOutputBuf, &pAggSup->pSessionAPI->stateStore);
|
||||
saveSessionOutputBuf(pAggSup, &curInfo.winInfo);
|
||||
}
|
||||
}
|
||||
taosMemoryFree(pBuf);
|
||||
|
|
|
@ -118,6 +118,12 @@ int32_t getDnodeListFromCache(SParseMetaCache* pMetaCache, SArray** pDnodes);
|
|||
void destoryParseMetaCache(SParseMetaCache* pMetaCache, bool request);
|
||||
SNode* createSelectStmtImpl(bool isDistinct, SNodeList* pProjectionList, SNode* pTable);
|
||||
|
||||
/**
|
||||
* @brief return a - b with overflow check
|
||||
* @retval val range between [INT64_MIN, INT64_MAX]
|
||||
*/
|
||||
int64_t int64SafeSub(int64_t a, int64_t b);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -3297,23 +3297,25 @@ static int32_t checkFill(STranslateContext* pCxt, SFillNode* pFill, SValueNode*
|
|||
if (NULL == pInterval) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int64_t timeRange = TABS(pFill->timeRange.skey - pFill->timeRange.ekey);
|
||||
int64_t timeRange = 0;
|
||||
int64_t intervalRange = 0;
|
||||
if (IS_CALENDAR_TIME_DURATION(pInterval->unit)) {
|
||||
int64_t f = 1;
|
||||
if (pInterval->unit == 'n') {
|
||||
f = 30LL * MILLISECOND_PER_DAY;
|
||||
} else if (pInterval->unit == 'y') {
|
||||
f = 365LL * MILLISECOND_PER_DAY;
|
||||
if (!pCxt->createStream) {
|
||||
int64_t res = int64SafeSub(pFill->timeRange.skey, pFill->timeRange.ekey);
|
||||
timeRange = res < 0 ? res == INT64_MIN ? INT64_MAX : -res : res;
|
||||
if (IS_CALENDAR_TIME_DURATION(pInterval->unit)) {
|
||||
int64_t f = 1;
|
||||
if (pInterval->unit == 'n') {
|
||||
f = 30LL * MILLISECOND_PER_DAY;
|
||||
} else if (pInterval->unit == 'y') {
|
||||
f = 365LL * MILLISECOND_PER_DAY;
|
||||
}
|
||||
intervalRange = pInterval->datum.i * f;
|
||||
} else {
|
||||
intervalRange = pInterval->datum.i;
|
||||
}
|
||||
if ((timeRange / intervalRange) >= MAX_INTERVAL_TIME_WINDOW) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_FILL_TIME_RANGE);
|
||||
}
|
||||
intervalRange = pInterval->datum.i * f;
|
||||
} else {
|
||||
intervalRange = pInterval->datum.i;
|
||||
}
|
||||
|
||||
if ((timeRange / intervalRange) >= MAX_INTERVAL_TIME_WINDOW) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_FILL_TIME_RANGE);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -1142,3 +1142,18 @@ void destoryParseMetaCache(SParseMetaCache* pMetaCache, bool request) {
|
|||
taosHashCleanup(pMetaCache->pTableIndex);
|
||||
taosHashCleanup(pMetaCache->pTableCfg);
|
||||
}
|
||||
|
||||
int64_t int64SafeSub(int64_t a, int64_t b) {
|
||||
int64_t res = (uint64_t)a - (uint64_t)b;
|
||||
|
||||
if (a >= 0 && b < 0) {
|
||||
if ((uint64_t)res > (uint64_t)INT64_MAX) {
|
||||
// overflow
|
||||
res = INT64_MAX;
|
||||
}
|
||||
} else if (a < 0 && b > 0 && res >= 0) {
|
||||
// underflow
|
||||
res = INT64_MIN;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -62,6 +62,7 @@ SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem*
|
|||
|
||||
int32_t streamAddEndScanHistoryMsg(SStreamTask* pTask, SRpcHandleInfo* pRpcInfo, SStreamScanHistoryFinishReq* pReq);
|
||||
int32_t streamNotifyUpstreamContinue(SStreamTask* pTask);
|
||||
int32_t streamTaskFillHistoryFinished(SStreamTask* pTask);
|
||||
|
||||
extern int32_t streamBackendId;
|
||||
extern int32_t streamBackendCfWrapperId;
|
||||
|
|
|
@ -379,7 +379,7 @@ int32_t tAppendDataToInputQueue(SStreamTask* pTask, SStreamQueueItem* pItem) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
qDebug("s-task:%s data block enqueue, current(blocks:%d, size:%.2fMiB)", pTask->id.idStr, total, size);
|
||||
qDebug("s-task:%s blockdata enqueue, total in queue:%d, size:%.2fMiB", pTask->id.idStr, total, size);
|
||||
int32_t code = taosWriteQitem(pTask->inputQueue->queue, pItem);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
destroyStreamDataBlock((SStreamDataBlock*) pItem);
|
||||
|
|
|
@ -162,23 +162,28 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) {
|
||||
int32_t code = 0;
|
||||
|
||||
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize) {
|
||||
ASSERT(pTask->info.taskLevel == TASK_LEVEL__SOURCE);
|
||||
void* exec = pTask->exec.pExecutor;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
void* exec = pTask->exec.pExecutor;
|
||||
bool finished = false;
|
||||
|
||||
qSetStreamOpOpen(exec);
|
||||
bool finished = false;
|
||||
|
||||
while (1) {
|
||||
while (!finished) {
|
||||
if (streamTaskShouldPause(&pTask->status)) {
|
||||
double el = (taosGetTimestampMs() - pTask->tsInfo.step1Start) / 1000.0;
|
||||
qDebug("s-task:%s paused from the scan-history task, elapsed time:%.2fsec", pTask->id.idStr, el);
|
||||
break;
|
||||
}
|
||||
|
||||
SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock));
|
||||
if (pRes == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t batchCnt = 0;
|
||||
int32_t numOfBlocks = 0;
|
||||
while (1) {
|
||||
if (streamTaskShouldStop(&pTask->status)) {
|
||||
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
||||
|
@ -187,34 +192,15 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) {
|
|||
|
||||
SSDataBlock* output = NULL;
|
||||
uint64_t ts = 0;
|
||||
if (qExecTask(exec, &output, &ts) < 0) {
|
||||
code = qExecTask(exec, &output, &ts);
|
||||
if (code != TSDB_CODE_TSC_QUERY_KILLED && code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s scan-history data error occurred code:%s, continue scan", pTask->id.idStr, tstrerror(code));
|
||||
continue;
|
||||
}
|
||||
|
||||
// the generated results before fill-history task been paused, should be dispatched to sink node
|
||||
if (output == NULL) {
|
||||
if (qStreamRecoverScanFinished(exec)) {
|
||||
finished = true;
|
||||
} else {
|
||||
qSetStreamOpOpen(exec);
|
||||
if (streamTaskShouldPause(&pTask->status)) {
|
||||
SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0);
|
||||
if (qRes == NULL) {
|
||||
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
qRes->type = STREAM_INPUT__DATA_BLOCK;
|
||||
qRes->blocks = pRes;
|
||||
code = streamTaskOutputResultBlock(pTask, qRes);
|
||||
if (code == TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY) {
|
||||
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
||||
taosFreeQitem(qRes);
|
||||
return code;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
finished = qStreamRecoverScanFinished(exec);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -223,86 +209,36 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) {
|
|||
block.info.childId = pTask->info.selfChildId;
|
||||
taosArrayPush(pRes, &block);
|
||||
|
||||
batchCnt++;
|
||||
|
||||
qDebug("s-task:%s scan exec numOfBlocks:%d, limit:%d", pTask->id.idStr, batchCnt, batchSz);
|
||||
if (batchCnt >= batchSz) {
|
||||
if ((++numOfBlocks) >= batchSize) {
|
||||
qDebug("s-task:%s scan exec numOfBlocks:%d, output limit:%d reached", pTask->id.idStr, numOfBlocks, batchSize);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (taosArrayGetSize(pRes) == 0) {
|
||||
taosArrayDestroy(pRes);
|
||||
|
||||
if (finished) {
|
||||
qDebug("s-task:%s finish recover exec task ", pTask->id.idStr);
|
||||
break;
|
||||
} else {
|
||||
qDebug("s-task:%s continue recover exec task ", pTask->id.idStr);
|
||||
continue;
|
||||
if (taosArrayGetSize(pRes) > 0) {
|
||||
SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0);
|
||||
if (qRes == NULL) {
|
||||
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0);
|
||||
if (qRes == NULL) {
|
||||
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
qRes->type = STREAM_INPUT__DATA_BLOCK;
|
||||
qRes->blocks = pRes;
|
||||
|
||||
qRes->type = STREAM_INPUT__DATA_BLOCK;
|
||||
qRes->blocks = pRes;
|
||||
code = streamTaskOutputResultBlock(pTask, qRes);
|
||||
if (code == TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY) {
|
||||
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
||||
taosFreeQitem(qRes);
|
||||
return code;
|
||||
}
|
||||
|
||||
if (finished) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
int32_t streamBatchExec(SStreamTask* pTask, int32_t batchLimit) {
|
||||
// fetch all queue item, merge according to batchLimit
|
||||
int32_t numOfItems = taosReadAllQitems(pTask->inputQueue1, pTask->inputQall);
|
||||
if (numOfItems == 0) {
|
||||
qDebug("task: %d, stream task exec over, queue empty", pTask->id.taskId);
|
||||
return 0;
|
||||
}
|
||||
SStreamQueueItem* pMerged = NULL;
|
||||
SStreamQueueItem* pItem = NULL;
|
||||
taosGetQitem(pTask->inputQall, (void**)&pItem);
|
||||
if (pItem == NULL) {
|
||||
if (pMerged != NULL) {
|
||||
// process merged item
|
||||
code = streamTaskOutputResultBlock(pTask, qRes);
|
||||
if (code == TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY) {
|
||||
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
|
||||
taosFreeQitem(qRes);
|
||||
return code;
|
||||
}
|
||||
} else {
|
||||
return 0;
|
||||
taosArrayDestroy(pRes);
|
||||
}
|
||||
}
|
||||
|
||||
// if drop
|
||||
if (pItem->type == STREAM_INPUT__DESTROY) {
|
||||
// set status drop
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pTask->info.taskLevel == TASK_LEVEL__SINK) {
|
||||
ASSERT(((SStreamQueueItem*)pItem)->type == STREAM_INPUT__DATA_BLOCK);
|
||||
streamTaskOutputResultBlock(pTask, (SStreamDataBlock*)pItem);
|
||||
}
|
||||
|
||||
// exec impl
|
||||
|
||||
// output
|
||||
// try dispatch
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int32_t updateCheckPointInfo(SStreamTask* pTask) {
|
||||
int64_t ckId = 0;
|
||||
|
@ -350,12 +286,12 @@ static void waitForTaskIdle(SStreamTask* pTask, SStreamTask* pStreamTask) {
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
|
||||
static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) {
|
||||
SStreamMeta* pMeta = pTask->pMeta;
|
||||
|
||||
SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.taskId);
|
||||
if (pStreamTask == NULL) {
|
||||
// todo: destroy this task here
|
||||
// todo: destroy the fill-history task here
|
||||
qError("s-task:%s failed to find related stream task:0x%x, it may have been destroyed or closed", pTask->id.idStr,
|
||||
pTask->streamTaskId.taskId);
|
||||
return TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||
|
@ -368,11 +304,12 @@ static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
|
|||
|
||||
STimeWindow* pTimeWindow = &pStreamTask->dataRange.window;
|
||||
|
||||
// todo. the dropping status should be append to the status after the halt completed.
|
||||
// It must be halted for a source stream task, since when the related scan-history-data task start scan the history
|
||||
// for the step 2. For a agg task
|
||||
// for the step 2.
|
||||
int8_t status = pStreamTask->status.taskStatus;
|
||||
if (pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||
ASSERT(status == TASK_STATUS__HALT);
|
||||
ASSERT(status == TASK_STATUS__HALT || status == TASK_STATUS__DROPPING);
|
||||
} else {
|
||||
ASSERT(status == TASK_STATUS__SCAN_HISTORY);
|
||||
pStreamTask->status.taskStatus = TASK_STATUS__HALT;
|
||||
|
@ -396,23 +333,28 @@ static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
|
|||
qDebug("s-task:%s no need to update time window for non-source task", pStreamTask->id.idStr);
|
||||
}
|
||||
|
||||
// expand the query time window for stream scanner
|
||||
// 1. expand the query time window for stream task of WAL scanner
|
||||
pTimeWindow->skey = INT64_MIN;
|
||||
qResetStreamInfoTimeWindow(pStreamTask->exec.pExecutor);
|
||||
qStreamInfoResetTimewindowFilter(pStreamTask->exec.pExecutor);
|
||||
|
||||
// transfer the ownership of executor state
|
||||
// 2. transfer the ownership of executor state
|
||||
streamTaskReleaseState(pTask);
|
||||
streamTaskReloadState(pStreamTask);
|
||||
|
||||
// 3. clear the link between fill-history task and stream task info
|
||||
pStreamTask->historyTaskId.taskId = 0;
|
||||
|
||||
// 4. resume the state of stream task, after this function, the stream task will run immidately. But it can not be
|
||||
// pause, since the pause allowed attribute is not set yet.
|
||||
streamTaskResumeFromHalt(pStreamTask);
|
||||
|
||||
qDebug("s-task:%s fill-history task set status to be dropping, save the state into disk", pTask->id.idStr);
|
||||
int32_t taskId = pTask->id.taskId;
|
||||
|
||||
// free it and remove it from disk meta-store
|
||||
// 5. free it and remove fill-history task from disk meta-store
|
||||
streamMetaUnregisterTask(pMeta, taskId);
|
||||
|
||||
// save to disk
|
||||
// 6. save to disk
|
||||
taosWLockLatch(&pMeta->lock);
|
||||
streamMetaSaveTask(pMeta, pStreamTask);
|
||||
if (streamMetaCommit(pMeta) < 0) {
|
||||
|
@ -420,7 +362,7 @@ static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
|
|||
}
|
||||
taosWUnLockLatch(&pMeta->lock);
|
||||
|
||||
// pause allowed
|
||||
// 7. pause allowed.
|
||||
streamTaskEnablePause(pStreamTask);
|
||||
|
||||
streamSchedExec(pStreamTask);
|
||||
|
@ -428,6 +370,26 @@ static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (!pTask->status.transferState) {
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t level = pTask->info.taskLevel;
|
||||
if (level == TASK_LEVEL__SOURCE) {
|
||||
streamTaskFillHistoryFinished(pTask);
|
||||
streamTaskEndScanWAL(pTask);
|
||||
} else if (level == TASK_LEVEL__AGG) { // do transfer task operator states.
|
||||
code = streamDoTransferStateToStreamTask(pTask);
|
||||
if (code != TSDB_CODE_SUCCESS) { // todo handle this
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t extractMsgFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks,
|
||||
const char* id) {
|
||||
int32_t retryTimes = 0;
|
||||
|
@ -581,17 +543,16 @@ int32_t streamTaskEndScanWAL(SStreamTask* pTask) {
|
|||
double el = (taosGetTimestampMs() - pTask->tsInfo.step2Start) / 1000.0;
|
||||
qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, el);
|
||||
|
||||
// 3. notify downstream tasks to transfer executor state after handle all history blocks.
|
||||
pTask->status.transferState = true;
|
||||
|
||||
// 1. notify all downstream tasks to transfer executor state after handle all history blocks.
|
||||
int32_t code = streamDispatchTransferStateMsg(pTask);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
// todo handle error
|
||||
}
|
||||
|
||||
// the last execution of fill-history task, in order to transfer task operator states.
|
||||
code = streamTransferStateToStreamTask(pTask);
|
||||
if (code != TSDB_CODE_SUCCESS) { // todo handle this
|
||||
// 2. do transfer stream task operator states.
|
||||
pTask->status.transferState = true;
|
||||
code = streamDoTransferStateToStreamTask(pTask);
|
||||
if (code != TSDB_CODE_SUCCESS) { // todo handle error
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -615,9 +576,12 @@ int32_t streamTryExec(SStreamTask* pTask) {
|
|||
// todo the task should be commit here
|
||||
if (taosQueueEmpty(pTask->inputQueue->queue)) {
|
||||
// fill-history WAL scan has completed
|
||||
if (pTask->status.taskStatus == TASK_STATUS__SCAN_HISTORY_WAL && pTask->status.transferState == true) {
|
||||
streamTaskRecoverSetAllStepFinished(pTask);
|
||||
streamTaskEndScanWAL(pTask);
|
||||
if (pTask->status.transferState) {
|
||||
code = streamTransferStateToStreamTask(pTask);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
streamSchedExec(pTask);
|
||||
} else {
|
||||
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE);
|
||||
qDebug("s-task:%s exec completed, status:%s, sched-status:%d", id, streamGetTaskStatusStr(pTask->status.taskStatus),
|
||||
|
|
|
@ -17,23 +17,30 @@
|
|||
#include "ttimer.h"
|
||||
#include "wal.h"
|
||||
|
||||
static void launchFillHistoryTask(SStreamTask* pTask);
|
||||
static void streamTaskSetRangeStreamCalc(SStreamTask* pTask);
|
||||
typedef struct SStreamTaskRetryInfo {
|
||||
SStreamMeta* pMeta;
|
||||
int32_t taskId;
|
||||
} SStreamTaskRetryInfo;
|
||||
|
||||
static void streamTaskSetForReady(SStreamTask* pTask, int32_t numOfReqs) {
|
||||
static int32_t streamSetParamForScanHistory(SStreamTask* pTask);
|
||||
static void launchFillHistoryTask(SStreamTask* pTask);
|
||||
static void streamTaskSetRangeStreamCalc(SStreamTask* pTask);
|
||||
static int32_t initScanHistoryReq(SStreamTask* pTask, SStreamScanHistoryReq* pReq, int8_t igUntreated);
|
||||
|
||||
static void streamTaskSetReady(SStreamTask* pTask, int32_t numOfReqs) {
|
||||
ASSERT(pTask->status.downstreamReady == 0);
|
||||
pTask->status.downstreamReady = 1;
|
||||
int64_t el = (taosGetTimestampMs() - pTask->tsInfo.init);
|
||||
|
||||
int64_t el = (taosGetTimestampMs() - pTask->tsInfo.init);
|
||||
qDebug("s-task:%s all %d downstream ready, init completed, elapsed time:%dms, task status:%s",
|
||||
pTask->id.idStr, numOfReqs, (int32_t) el, streamGetTaskStatusStr(pTask->status.taskStatus));
|
||||
}
|
||||
|
||||
int32_t streamStartRecoverTask(SStreamTask* pTask, int8_t igUntreated) {
|
||||
int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated) {
|
||||
SStreamScanHistoryReq req;
|
||||
streamBuildSourceRecover1Req(pTask, &req, igUntreated);
|
||||
int32_t len = sizeof(SStreamScanHistoryReq);
|
||||
initScanHistoryReq(pTask, &req, igUntreated);
|
||||
|
||||
int32_t len = sizeof(SStreamScanHistoryReq);
|
||||
void* serializedReq = rpcMallocCont(len);
|
||||
if (serializedReq == NULL) {
|
||||
return -1;
|
||||
|
@ -65,9 +72,9 @@ static int32_t doLaunchScanHistoryTask(SStreamTask* pTask) {
|
|||
if (pTask->info.fillHistory) {
|
||||
streamSetParamForScanHistory(pTask);
|
||||
}
|
||||
streamSetParamForStreamScannerStep1(pTask, pRange, &pTask->dataRange.window);
|
||||
|
||||
int32_t code = streamStartRecoverTask(pTask, 0);
|
||||
streamSetParamForStreamScannerStep1(pTask, pRange, &pTask->dataRange.window);
|
||||
int32_t code = streamStartScanHistoryAsync(pTask, 0);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -85,6 +92,7 @@ int32_t streamTaskLaunchScanHistory(SStreamTask* pTask) {
|
|||
if (pTask->info.fillHistory) {
|
||||
streamSetParamForScanHistory(pTask);
|
||||
}
|
||||
streamTaskEnablePause(pTask);
|
||||
streamTaskScanHistoryPrepare(pTask);
|
||||
} else if (pTask->info.taskLevel == TASK_LEVEL__SINK) {
|
||||
qDebug("s-task:%s sink task do nothing to handle scan-history", pTask->id.idStr);
|
||||
|
@ -141,7 +149,7 @@ int32_t streamTaskDoCheckDownstreamTasks(SStreamTask* pTask) {
|
|||
} else {
|
||||
qDebug("s-task:%s (vgId:%d) set downstream ready, since no downstream", pTask->id.idStr, pTask->info.nodeId);
|
||||
|
||||
streamTaskSetForReady(pTask, 0);
|
||||
streamTaskSetReady(pTask, 0);
|
||||
streamTaskSetRangeStreamCalc(pTask);
|
||||
streamTaskLaunchScanHistory(pTask);
|
||||
|
||||
|
@ -187,7 +195,7 @@ int32_t streamTaskCheckStatus(SStreamTask* pTask) {
|
|||
}
|
||||
|
||||
static void doProcessDownstreamReadyRsp(SStreamTask* pTask, int32_t numOfReqs) {
|
||||
streamTaskSetForReady(pTask, numOfReqs);
|
||||
streamTaskSetReady(pTask, numOfReqs);
|
||||
const char* id = pTask->id.idStr;
|
||||
|
||||
int8_t status = pTask->status.taskStatus;
|
||||
|
@ -318,7 +326,7 @@ int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange *p
|
|||
return qStreamSourceScanParamForHistoryScanStep2(pTask->exec.pExecutor, pVerRange, pWindow);
|
||||
}
|
||||
|
||||
int32_t streamBuildSourceRecover1Req(SStreamTask* pTask, SStreamScanHistoryReq* pReq, int8_t igUntreated) {
|
||||
int32_t initScanHistoryReq(SStreamTask* pTask, SStreamScanHistoryReq* pReq, int8_t igUntreated) {
|
||||
pReq->msgHead.vgId = pTask->info.nodeId;
|
||||
pReq->streamId = pTask->id.streamId;
|
||||
pReq->taskId = pTask->id.taskId;
|
||||
|
@ -523,11 +531,6 @@ static void doCheckDownstreamStatus(SStreamTask* pTask, SStreamTask* pHTask) {
|
|||
streamTaskDoCheckDownstreamTasks(pHTask);
|
||||
}
|
||||
|
||||
typedef struct SStreamTaskRetryInfo {
|
||||
SStreamMeta* pMeta;
|
||||
int32_t taskId;
|
||||
} SStreamTaskRetryInfo;
|
||||
|
||||
static void tryLaunchHistoryTask(void* param, void* tmrId) {
|
||||
SStreamTaskRetryInfo* pInfo = param;
|
||||
SStreamMeta* pMeta = pInfo->pMeta;
|
||||
|
@ -637,7 +640,7 @@ int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask) {
|
|||
}
|
||||
}
|
||||
|
||||
// dispatch recover finish req to all related downstream task
|
||||
// dispatch scan-history finish req to all related downstream task
|
||||
code = streamDispatchScanHistoryFinishMsg(pTask);
|
||||
if (code < 0) {
|
||||
return -1;
|
||||
|
@ -646,19 +649,9 @@ int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
bool streamTaskRecoverScanStep1Finished(SStreamTask* pTask) {
|
||||
int32_t streamTaskFillHistoryFinished(SStreamTask* pTask) {
|
||||
void* exec = pTask->exec.pExecutor;
|
||||
return qStreamRecoverScanStep1Finished(exec);
|
||||
}
|
||||
|
||||
bool streamTaskRecoverScanStep2Finished(SStreamTask* pTask) {
|
||||
void* exec = pTask->exec.pExecutor;
|
||||
return qStreamRecoverScanStep2Finished(exec);
|
||||
}
|
||||
|
||||
int32_t streamTaskRecoverSetAllStepFinished(SStreamTask* pTask) {
|
||||
void* exec = pTask->exec.pExecutor;
|
||||
return qStreamRecoverSetAllStepFinished(exec);
|
||||
return qStreamInfoResetTimewindowFilter(exec);
|
||||
}
|
||||
|
||||
bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer) {
|
||||
|
@ -668,7 +661,7 @@ bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer) {
|
|||
int64_t nextStartVer = pRange->maxVer + 1;
|
||||
if (nextStartVer > latestVer - 1) {
|
||||
// no input data yet. no need to execute the secondardy scan while stream task halt
|
||||
streamTaskRecoverSetAllStepFinished(pTask);
|
||||
streamTaskFillHistoryFinished(pTask);
|
||||
qDebug(
|
||||
"s-task:%s no need to perform secondary scan-history data(step 2), since no data ingest during step1 scan, "
|
||||
"related stream task currentVer:%" PRId64,
|
||||
|
@ -683,7 +676,6 @@ bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
int32_t tEncodeStreamTaskCheckReq(SEncoder* pEncoder, const SStreamTaskCheckReq* pReq) {
|
||||
if (tStartEncode(pEncoder) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pReq->reqId) < 0) return -1;
|
||||
|
@ -839,7 +831,7 @@ void streamTaskPause(SStreamTask* pTask) {
|
|||
return;
|
||||
}
|
||||
|
||||
while(!pTask->status.pauseAllowed || (pTask->status.taskStatus == TASK_STATUS__HALT)) {
|
||||
while (!pTask->status.pauseAllowed || (pTask->status.taskStatus == TASK_STATUS__HALT)) {
|
||||
status = pTask->status.taskStatus;
|
||||
if (status == TASK_STATUS__DROPPING) {
|
||||
qDebug("vgId:%d s-task:%s task already dropped, do nothing", pMeta->vgId, pTask->id.idStr);
|
||||
|
@ -856,8 +848,25 @@ void streamTaskPause(SStreamTask* pTask) {
|
|||
taosMsleep(100);
|
||||
}
|
||||
|
||||
// todo: use the task lock, stead of meta lock
|
||||
taosWLockLatch(&pMeta->lock);
|
||||
|
||||
status = pTask->status.taskStatus;
|
||||
if (status == TASK_STATUS__DROPPING || status == TASK_STATUS__STOP) {
|
||||
taosWUnLockLatch(&pMeta->lock);
|
||||
qDebug("vgId:%d s-task:%s task already dropped/stopped/paused, do nothing", pMeta->vgId, pTask->id.idStr);
|
||||
return;
|
||||
}
|
||||
|
||||
atomic_store_8(&pTask->status.keepTaskStatus, pTask->status.taskStatus);
|
||||
atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__PAUSE);
|
||||
taosWUnLockLatch(&pMeta->lock);
|
||||
|
||||
// in case of fill-history task, stop the tsdb file scan operation.
|
||||
if (pTask->info.fillHistory == 1) {
|
||||
void* pExecutor = pTask->exec.pExecutor;
|
||||
qKillTask(pExecutor, TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
int64_t el = taosGetTimestampMs() - st;
|
||||
qDebug("vgId:%d s-task:%s set pause flag, prev:%s, elapsed time:%dms", pMeta->vgId, pTask->id.idStr,
|
||||
|
|
|
@ -729,6 +729,7 @@ void streamStateFreeVal(void* val) {
|
|||
|
||||
int32_t streamStateSessionPut(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen) {
|
||||
#ifdef USE_ROCKSDB
|
||||
qDebug("===stream===save skey:%" PRId64 ", ekey:%" PRId64 ", groupId:%" PRIu64, key->win.skey,key->win.ekey, key->groupId);
|
||||
return streamStateSessionPut_rocksdb(pState, key, value, vLen);
|
||||
#else
|
||||
SStateSessionKey sKey = {.key = *key, .opNum = pState->number};
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
|
||||
static int64_t adjustExpEntries(int64_t entries) { return TMIN(DEFAULT_EXPECTED_ENTRIES, entries); }
|
||||
|
||||
static void windowSBfAdd(SUpdateInfo *pInfo, uint64_t count) {
|
||||
void windowSBfAdd(SUpdateInfo *pInfo, uint64_t count) {
|
||||
if (pInfo->numSBFs < count) {
|
||||
count = pInfo->numSBFs;
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ static void clearItemHelper(void *p) {
|
|||
tScalableBfDestroy(*pBf);
|
||||
}
|
||||
|
||||
static void windowSBfDelete(SUpdateInfo *pInfo, uint64_t count) {
|
||||
void windowSBfDelete(SUpdateInfo *pInfo, uint64_t count) {
|
||||
if (count < pInfo->numSBFs) {
|
||||
for (uint64_t i = 0; i < count; ++i) {
|
||||
SScalableBf *pTsSBFs = taosArrayGetP(pInfo->pTsSBFs, 0);
|
||||
|
|
|
@ -371,9 +371,9 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) {
|
|||
pRead->pWal->vers.appliedVer);
|
||||
|
||||
// TODO: valid ver
|
||||
// if (ver > pRead->pWal->vers.appliedVer) {
|
||||
// return -1;
|
||||
// }
|
||||
if (ver > pRead->pWal->vers.commitVer) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pRead->curVersion != ver) {
|
||||
code = walReaderSeekVer(pRead, ver);
|
||||
|
|
|
@ -17,6 +17,15 @@
|
|||
#include <pthread.h>
|
||||
#include "os.h"
|
||||
|
||||
#ifdef WINDOWS
|
||||
#define THREAD_PTR_CHECK(p) \
|
||||
do { \
|
||||
if (!(p) || !(*(p))) return 0; \
|
||||
} while (0);
|
||||
#else
|
||||
#define THREAD_PTR_CHECK(p)
|
||||
#endif
|
||||
|
||||
int32_t taosThreadCreate(TdThread *tid, const TdThreadAttr *attr, void *(*start)(void *), void *arg) {
|
||||
return pthread_create(tid, attr, start, arg);
|
||||
}
|
||||
|
@ -83,9 +92,13 @@ int32_t taosThreadCondSignal(TdThreadCond *cond) { return pthread_cond_signal(co
|
|||
|
||||
int32_t taosThreadCondBroadcast(TdThreadCond *cond) { return pthread_cond_broadcast(cond); }
|
||||
|
||||
int32_t taosThreadCondWait(TdThreadCond *cond, TdThreadMutex *mutex) { return pthread_cond_wait(cond, mutex); }
|
||||
int32_t taosThreadCondWait(TdThreadCond *cond, TdThreadMutex *mutex) {
|
||||
THREAD_PTR_CHECK(mutex)
|
||||
return pthread_cond_wait(cond, mutex);
|
||||
}
|
||||
|
||||
int32_t taosThreadCondTimedWait(TdThreadCond *cond, TdThreadMutex *mutex, const struct timespec *abstime) {
|
||||
THREAD_PTR_CHECK(mutex)
|
||||
return pthread_cond_timedwait(cond, mutex, abstime);
|
||||
}
|
||||
|
||||
|
@ -124,24 +137,37 @@ int32_t taosThreadKeyDelete(TdThreadKey key) { return pthread_key_delete(key); }
|
|||
int32_t taosThreadKill(TdThread thread, int32_t sig) { return pthread_kill(thread, sig); }
|
||||
|
||||
// int32_t taosThreadMutexConsistent(TdThreadMutex* mutex) {
|
||||
// THREAD_PTR_CHECK(mutex)
|
||||
// return pthread_mutex_consistent(mutex);
|
||||
// }
|
||||
|
||||
int32_t taosThreadMutexDestroy(TdThreadMutex *mutex) { return pthread_mutex_destroy(mutex); }
|
||||
int32_t taosThreadMutexDestroy(TdThreadMutex *mutex) {
|
||||
THREAD_PTR_CHECK(mutex)
|
||||
return pthread_mutex_destroy(mutex);
|
||||
}
|
||||
|
||||
int32_t taosThreadMutexInit(TdThreadMutex *mutex, const TdThreadMutexAttr *attr) {
|
||||
return pthread_mutex_init(mutex, attr);
|
||||
}
|
||||
|
||||
int32_t taosThreadMutexLock(TdThreadMutex *mutex) { return pthread_mutex_lock(mutex); }
|
||||
int32_t taosThreadMutexLock(TdThreadMutex *mutex) {
|
||||
THREAD_PTR_CHECK(mutex)
|
||||
return pthread_mutex_lock(mutex);
|
||||
}
|
||||
|
||||
// int32_t taosThreadMutexTimedLock(TdThreadMutex * mutex, const struct timespec *abstime) {
|
||||
// return pthread_mutex_timedlock(mutex, abstime);
|
||||
// }
|
||||
|
||||
int32_t taosThreadMutexTryLock(TdThreadMutex *mutex) { return pthread_mutex_trylock(mutex); }
|
||||
int32_t taosThreadMutexTryLock(TdThreadMutex *mutex) {
|
||||
THREAD_PTR_CHECK(mutex)
|
||||
return pthread_mutex_trylock(mutex);
|
||||
}
|
||||
|
||||
int32_t taosThreadMutexUnlock(TdThreadMutex *mutex) { return pthread_mutex_unlock(mutex); }
|
||||
int32_t taosThreadMutexUnlock(TdThreadMutex *mutex) {
|
||||
THREAD_PTR_CHECK(mutex)
|
||||
return pthread_mutex_unlock(mutex);
|
||||
}
|
||||
|
||||
int32_t taosThreadMutexAttrDestroy(TdThreadMutexAttr *attr) { return pthread_mutexattr_destroy(attr); }
|
||||
|
||||
|
@ -224,6 +250,7 @@ int32_t taosThreadSetSchedParam(TdThread thread, int32_t policy, const struct sc
|
|||
int32_t taosThreadSetSpecific(TdThreadKey key, const void *value) { return pthread_setspecific(key, value); }
|
||||
|
||||
int32_t taosThreadSpinDestroy(TdThreadSpinlock *lock) {
|
||||
THREAD_PTR_CHECK(lock)
|
||||
#ifdef TD_USE_SPINLOCK_AS_MUTEX
|
||||
return pthread_mutex_destroy((pthread_mutex_t *)lock);
|
||||
#else
|
||||
|
@ -242,6 +269,7 @@ int32_t taosThreadSpinInit(TdThreadSpinlock *lock, int32_t pshared) {
|
|||
}
|
||||
|
||||
int32_t taosThreadSpinLock(TdThreadSpinlock *lock) {
|
||||
THREAD_PTR_CHECK(lock)
|
||||
#ifdef TD_USE_SPINLOCK_AS_MUTEX
|
||||
return pthread_mutex_lock((pthread_mutex_t *)lock);
|
||||
#else
|
||||
|
@ -250,6 +278,7 @@ int32_t taosThreadSpinLock(TdThreadSpinlock *lock) {
|
|||
}
|
||||
|
||||
int32_t taosThreadSpinTrylock(TdThreadSpinlock *lock) {
|
||||
THREAD_PTR_CHECK(lock)
|
||||
#ifdef TD_USE_SPINLOCK_AS_MUTEX
|
||||
return pthread_mutex_trylock((pthread_mutex_t *)lock);
|
||||
#else
|
||||
|
@ -258,6 +287,7 @@ int32_t taosThreadSpinTrylock(TdThreadSpinlock *lock) {
|
|||
}
|
||||
|
||||
int32_t taosThreadSpinUnlock(TdThreadSpinlock *lock) {
|
||||
THREAD_PTR_CHECK(lock)
|
||||
#ifdef TD_USE_SPINLOCK_AS_MUTEX
|
||||
return pthread_mutex_unlock((pthread_mutex_t *)lock);
|
||||
#else
|
||||
|
|
|
@ -5,6 +5,13 @@ if (DEFINED GRANT_CFG_INCLUDE_DIR)
|
|||
add_definitions(-DGRANTS_CFG)
|
||||
endif()
|
||||
|
||||
IF (${ASSERT_NOT_CORE})
|
||||
ADD_DEFINITIONS(-DASSERT_NOT_CORE)
|
||||
MESSAGE(STATUS "disable assert core")
|
||||
ELSE ()
|
||||
MESSAGE(STATUS "enable assert core")
|
||||
ENDIF (${ASSERT_NOT_CORE})
|
||||
|
||||
target_include_directories(
|
||||
util
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/util"
|
||||
|
|
|
@ -644,6 +644,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_CLOSED, "Consumer closed")
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_ERROR, "Consumer error, to see log")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_TOPIC_OUT_OF_RANGE, "Topic num out of range")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_GROUP_OUT_OF_RANGE, "Group num out of range 100")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_SAME_COMMITTED_VALUE, "Same committed value")
|
||||
|
||||
// stream
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_TASK_NOT_EXIST, "Stream task not exist")
|
||||
|
|
|
@ -76,7 +76,11 @@ static int32_t tsDaylightActive; /* Currently in daylight saving time. */
|
|||
|
||||
bool tsLogEmbedded = 0;
|
||||
bool tsAsyncLog = true;
|
||||
#ifdef ASSERT_NOT_CORE
|
||||
bool tsAssert = false;
|
||||
#else
|
||||
bool tsAssert = true;
|
||||
#endif
|
||||
int32_t tsNumOfLogLines = 10000000;
|
||||
int32_t tsLogKeepDays = 0;
|
||||
LogFp tsLogFp = NULL;
|
||||
|
|
|
@ -105,7 +105,7 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py
|
||||
#,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDnodeRestart.py
|
||||
|
@ -160,7 +160,7 @@
|
|||
,,n,system-test,python3 ./test.py -f 0-others/tag_index_basic.py
|
||||
,,n,system-test,python3 ./test.py -f 0-others/udfpy_main.py
|
||||
,,n,system-test,python3 ./test.py -N 3 -f 0-others/walRetention.py
|
||||
,,n,system-test,python3 ./test.py -f 0-others/splitVGroup.py -N 5
|
||||
#,,n,system-test,python3 ./test.py -f 0-others/splitVGroup.py -N 5
|
||||
,,n,system-test,python3 ./test.py -f 0-others/timeRangeWise.py -N 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_database.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_replica.py -N 3
|
||||
|
@ -345,6 +345,7 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/smaTest.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/smaTest.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/sma_index.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sml_TS-3724.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sml.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sml.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/spread.py
|
||||
|
|
|
@ -3,4 +3,6 @@
|
|||
rem echo taskkill /F /IM taosd.exe
|
||||
|
||||
wmic process where "name='taosd.exe'" call terminate > NUL 2>&1
|
||||
taskkill /F /IM taosd.exe > NUL 2>&1
|
||||
taskkill /F /IM taosd.exe > NUL 2>&1
|
||||
|
||||
rem echo taskkill /F /IM taosd.exe finished
|
|
@ -24,6 +24,8 @@ import threading
|
|||
import json
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'clientCfg': {'smlDot2Underline': 0}}
|
||||
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
|
|
|
@ -28,6 +28,8 @@ if platform.system().lower() == 'windows':
|
|||
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'clientCfg': {'smlDot2Underline': 0}}
|
||||
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
|
|
|
@ -15,7 +15,7 @@ sys.path.append("./7-tmq")
|
|||
from tmqCommon import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'clientCfg': {'smlChildTableName': 'dataModelName', 'fqdn': 'localhost'}, 'fqdn': 'localhost'}
|
||||
updatecfgDict = {'clientCfg': {'smlChildTableName': 'dataModelName', 'fqdn': 'localhost', 'smlDot2Underline': 0}, 'fqdn': 'localhost'}
|
||||
print("===================: ", updatecfgDict)
|
||||
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
|
@ -101,6 +101,15 @@ class TDTestCase:
|
|||
|
||||
tdSql.query(f"desc {dbname}.macylr")
|
||||
tdSql.checkRows(25)
|
||||
|
||||
tdSql.query(f"select * from ts3724.`.stb2`")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.query(f"select * from ts3724.`stb.2`")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.query(f"select * from ts3724.`stb2.`")
|
||||
tdSql.checkRows(1)
|
||||
return
|
||||
|
||||
def run(self):
|
||||
|
|
|
@ -0,0 +1,125 @@
|
|||
import taos
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
import os
|
||||
import threading
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
from util.common import *
|
||||
sys.path.append("./7-tmq")
|
||||
from tmqCommon import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'clientCfg': {'smlChildTableName': 'dataModelName', 'fqdn': 'localhost', 'smlTsDefaultName': "times"}, 'fqdn': 'localhost'}
|
||||
print("===================: ", updatecfgDict)
|
||||
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), True)
|
||||
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
|
||||
|
||||
def checkContent(self, dbname="sml_db"):
|
||||
simClientCfg="%s/taos.cfg"%tdDnodes.getSimCfgPath()
|
||||
buildPath = tdCom.getBuildPath()
|
||||
cmdStr = '%s/build/bin/sml_test %s'%(buildPath, simClientCfg)
|
||||
print("cmdStr:", cmdStr)
|
||||
tdLog.info(cmdStr)
|
||||
ret = os.system(cmdStr)
|
||||
if ret != 0:
|
||||
tdLog.info("sml_test ret != 0")
|
||||
|
||||
tdSql.query(f"select * from ts3303.stb2")
|
||||
tdSql.query(f"select * from ts3303.meters")
|
||||
|
||||
# tdSql.execute('use sml_db')
|
||||
tdSql.query(f"select * from {dbname}.t_b7d815c9222ca64cdf2614c61de8f211")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.checkData(0, 0, '2016-01-01 08:00:07.000')
|
||||
tdSql.checkData(0, 1, 2000)
|
||||
tdSql.checkData(0, 2, 200)
|
||||
tdSql.checkData(0, 3, 15)
|
||||
tdSql.checkData(0, 4, 24.5208)
|
||||
tdSql.checkData(0, 5, 28.09377)
|
||||
tdSql.checkData(0, 6, 428)
|
||||
tdSql.checkData(0, 7, 0)
|
||||
tdSql.checkData(0, 8, 304)
|
||||
tdSql.checkData(0, 9, 0)
|
||||
tdSql.checkData(0, 10, 25)
|
||||
|
||||
tdSql.query(f"select * from {dbname}.readings")
|
||||
tdSql.checkRows(9)
|
||||
|
||||
tdSql.query(f"select distinct tbname from {dbname}.readings")
|
||||
tdSql.checkRows(4)
|
||||
|
||||
tdSql.query(f"select * from {dbname}.t_0799064f5487946e5d22164a822acfc8 order by times")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 3, "kk")
|
||||
tdSql.checkData(1, 3, "")
|
||||
|
||||
|
||||
tdSql.query(f"select distinct tbname from {dbname}.`sys_if_bytes_out`")
|
||||
tdSql.checkRows(2)
|
||||
|
||||
tdSql.query(f"select * from {dbname}.t_fc70dec6677d4277c5d9799c4da806da order by times")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 1.300000000)
|
||||
tdSql.checkData(1, 1, 13.000000000)
|
||||
|
||||
tdSql.query(f"select * from {dbname}.`sys_procs_running`")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 1, 42.000000000)
|
||||
tdSql.checkData(0, 2, "web01")
|
||||
|
||||
tdSql.query(f"select distinct tbname from {dbname}.`sys_cpu_nice`")
|
||||
tdSql.checkRows(3)
|
||||
|
||||
tdSql.query(f"select * from {dbname}.`sys_cpu_nice` order by times")
|
||||
tdSql.checkRows(4)
|
||||
tdSql.checkData(0, 1, 13.000000000)
|
||||
tdSql.checkData(0, 2, "web01")
|
||||
tdSql.checkData(0, 3, None)
|
||||
tdSql.checkData(0, 4, "lga")
|
||||
|
||||
tdSql.checkData(1, 1, 9.000000000)
|
||||
tdSql.checkData(1, 2, "web02")
|
||||
tdSql.checkData(3, 3, "t1")
|
||||
tdSql.checkData(0, 4, "lga")
|
||||
|
||||
tdSql.query(f"select * from {dbname}.macylr")
|
||||
tdSql.checkRows(2)
|
||||
|
||||
tdSql.query(f"select * from {dbname}.qelhxo")
|
||||
tdSql.checkRows(5)
|
||||
|
||||
tdSql.query(f"desc {dbname}.macylr")
|
||||
tdSql.checkRows(25)
|
||||
|
||||
tdSql.query(f"select * from ts3724._stb2")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.query(f"select * from ts3724.stb_2")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.query(f"select * from ts3724.stb2_")
|
||||
tdSql.checkRows(1)
|
||||
return
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
self.checkContent()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
|
@ -222,9 +222,9 @@ class TDTestCase:
|
|||
|
||||
actConsumeTotalRows = resultList[0]
|
||||
|
||||
if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted):
|
||||
if not (actConsumeTotalRows >= 0 and actConsumeTotalRows <= totalRowsInserted):
|
||||
tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
|
||||
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
|
||||
tdLog.info("and second consume rows should be between [0 and %d]"%(totalRowsInserted))
|
||||
tdLog.exit("%d tmq consume rows error!"%consumerId)
|
||||
|
||||
time.sleep(10)
|
||||
|
|
|
@ -68,7 +68,7 @@ int32_t shellCheckIntSize() {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void shellPrintVersion() { printf("version: %s\r\n", version); }
|
||||
void shellPrintVersion() { printf("%s\r\n", shell.info.programVersion); }
|
||||
|
||||
void shellGenerateAuth() {
|
||||
char secretEncrypt[TSDB_PASSWORD_LEN + 1] = {0};
|
||||
|
|
|
@ -1522,6 +1522,36 @@ int sml_ts2385_Test() {
|
|||
return code;
|
||||
}
|
||||
|
||||
int sml_ts3724_Test() {
|
||||
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
|
||||
TAOS_RES *pRes = taos_query(taos, "drop database if exists ts3724");
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(taos, "create database if not exists ts3724");
|
||||
taos_free_result(pRes);
|
||||
|
||||
const char *sql[] = {
|
||||
"stb.2,t1=1 f1=283i32 1632299372000",
|
||||
".stb2,t1=1 f1=106i32 1632299378000",
|
||||
"stb2.,t1=1 f1=106i32 1632299378000",
|
||||
};
|
||||
|
||||
pRes = taos_query(taos, "use ts3724");
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL,
|
||||
TSDB_SML_TIMESTAMP_MILLI_SECONDS);
|
||||
|
||||
int code = taos_errno(pRes);
|
||||
printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes));
|
||||
taos_free_result(pRes);
|
||||
|
||||
taos_close(taos);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
if (argc == 2) {
|
||||
taos_options(TSDB_OPTION_CONFIGDIR, argv[1]);
|
||||
|
@ -1579,5 +1609,8 @@ int main(int argc, char *argv[]) {
|
|||
ASSERT(!ret);
|
||||
ret = sml_19221_Test();
|
||||
ASSERT(!ret);
|
||||
ret = sml_ts3724_Test();
|
||||
ASSERT(!ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ aux_source_directory(src TSIM_SRC)
|
|||
add_executable(tsim ${TSIM_SRC})
|
||||
target_link_libraries(
|
||||
tsim
|
||||
PUBLIC taos
|
||||
PUBLIC taos_static
|
||||
PUBLIC util
|
||||
PUBLIC common
|
||||
PUBLIC os
|
||||
|
|
Loading…
Reference in New Issue