Merge branch '3.0' of https://github.com/taosdata/TDengine into multiRead

This commit is contained in:
yihaoDeng 2022-07-28 11:07:27 +08:00
commit db6911001a
190 changed files with 7668 additions and 6571 deletions

2
.gitignore vendored
View File

@ -48,6 +48,8 @@ pysim/
*.out
*DS_Store
tests/script/api/batchprepare
taosadapter
taosadapter-debug
# Doxygen Generated files
html/

9
.gitmodules vendored
View File

@ -13,12 +13,3 @@
[submodule "examples/rust"]
path = examples/rust
url = https://github.com/songtianyi/tdengine-rust-bindings.git
[submodule "tools/taos-tools"]
path = tools/taos-tools
url = https://github.com/taosdata/taos-tools
[submodule "tools/taosadapter"]
path = tools/taosadapter
url = https://github.com/taosdata/taosadapter.git
[submodule "tools/taosws-rs"]
path = tools/taosws-rs
url = https://github.com/taosdata/taosws-rs

View File

@ -113,6 +113,12 @@ def pre_test(){
echo "unmatched reposiotry ${CHANGE_URL}"
'''
}
sh '''
cd ${WKC}
git rm --cached tools/taos-tools 2>/dev/null || :
git rm --cached tools/taosadapter 2>/dev/null || :
git rm --cached tools/taosws-rs 2>/dev/null || :
'''
sh '''
cd ${WKC}
git submodule update --init --recursive
@ -258,6 +264,13 @@ def pre_test_win(){
git branch
git log -5
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git rm --cached tools/taos-tools 2>nul
git rm --cached tools/taosadapter 2>nul
git rm --cached tools/taosws-rs 2>nul
exit 0
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git submodule update --init --recursive

View File

@ -0,0 +1,13 @@
# zlib
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG df8678f
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)

View File

@ -0,0 +1,13 @@
# zlib
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG 817cb6a
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)

View File

@ -0,0 +1,13 @@
# zlib
ExternalProject_Add(taosws-rs
GIT_REPOSITORY https://github.com/taosdata/taosws-rs.git
GIT_TAG 9de599d
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)

View File

@ -9,6 +9,24 @@ endfunction(cat IN_FILE OUT_FILE)
set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in")
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
# taos-tools
if(${BUILD_TOOLS})
cat("${TD_SUPPORT_DIR}/taostools_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif()
# taosws-rs
if(${WEBSOCKET})
cat("${TD_SUPPORT_DIR}/taosws_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif()
# taosadapter
if(${BUILD_HTTP})
MESSAGE("BUILD_HTTP is on")
else ()
MESSAGE("BUILD_HTTP is off, use taosAdapter")
cat("${TD_SUPPORT_DIR}/taosadapter_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif()
# pthread
if(${BUILD_PTHREAD})
cat("${TD_SUPPORT_DIR}/pthread_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})

View File

@ -18,7 +18,7 @@ database_option: {
| CACHESIZE value
| COMP {0 | 1 | 2}
| DURATION value
| FSYNC value
| WAL_FSYNC_PERIOD value
| MAXROWS value
| MINROWS value
| KEEP value
@ -28,7 +28,7 @@ database_option: {
| REPLICA value
| RETENTIONS ingestion_duration:keep_duration ...
| STRICT {'off' | 'on'}
| WAL {1 | 2}
| WAL_LEVEL {1 | 2}
| VGROUPS value
| SINGLE_STABLE {0 | 1}
| WAL_RETENTION_PERIOD value
@ -39,41 +39,42 @@ database_option: {
```
### 参数说明
- buffer: 一个 VNODE 写入内存池大小单位为MB默认为96最小为3最大为16384。
- CACHEMODEL表示是否在内存中缓存子表的最近数据。默认为none。
- none表示不缓存。
- last_row表示缓存子表最近一行数据。这将显著改善 LAST_ROW 函数的性能表现。
- last_value表示缓存子表每一列的最近的非 NULL 值。这将显著改善无特殊影响WHERE、ORDER BY、GROUP BY、INTERVAL下的 LAST 函数的性能表现。
- both表示同时打开缓存最近行和列功能。
- CACHESIZE表示缓存子表最近数据的内存大小。默认为 1 ,范围是[1, 65536],单位是 MB。
- BUFFER: 一个 VNODE 写入内存池大小,单位为 MB默认为 96最小为 3最大为 16384。
- CACHEMODEL表示是否在内存中缓存子表的最近数据。默认为 none。
- none表示不缓存。
- last_row表示缓存子表最近一行数据。这将显著改善 LAST_ROW 函数的性能表现。
- last_value表示缓存子表每一列的最近的非 NULL 值。这将显著改善无特殊影响WHERE、ORDER BY、GROUP BY、INTERVAL下的 LAST 函数的性能表现。
- both表示同时打开缓存最近行和列功能。
- CACHESIZE表示每个 vnode 中用于缓存子表最近数据的内存大小。默认为 1 ,范围是[1, 65536],单位是 MB。
- COMP表示数据库文件压缩标志位缺省值为 2取值范围为 [0, 2]。
- 0表示不压缩。
- 1表示一阶段压缩。
- 2表示两阶段压缩。
- DURATION数据文件存储数据的时间跨度。可以使用加单位的表示形式如 DURATION 100h、DURATION 10d等支持 m分钟、h小时和 d三个单位。不加时间单位时默认单位为天如 DURATION 50 表示 50 天。
- FSYNC当 WAL 参数设置为2时落盘的周期。默认为3000单位毫秒。最小为0表示每次写入立即落盘最大为180000即三分钟。
- MAXROWS文件块中记录的最大条数默认为4096条。
- MINROWS文件块中记录的最小条数默认为100条。
- KEEP表示数据文件保存的天数缺省值为 3650取值范围 [1, 365000],且必须大于或等于 DURATION 参数值。数据库会自动删除保存时间超过KEEP值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等支持m分钟、h小时和 d三个单位。也可以不写单位如 KEEP 50此时默认单位为天。
- PAGES一个 VNODE 中元数据存储引擎的缓存页个数默认为256最小64。一个 VNODE 元数据存储占用 PAGESIZE * PAGES默认情况下为1MB内存。
- PAGESIZE一个 VNODE 中元数据存储引擎的页大小单位为KB默认为4 KB。范围为1到16384即1 KB到16 MB。
- PRECISION数据库的时间戳精度。ms表示毫秒us表示微秒ns表示纳秒默认ms毫秒。
- REPLICA表示数据库副本数取值为1或3默认为1。在集群中使用副本数必须小于或等于 DNODE 的数目。
- RETENTIONS表示数据的聚合周期和保存时长如RETENTIONS 15s:7d,1m:21d,15m:50d表示数据原始采集周期为15秒原始数据保存7天按1分钟聚合的数据保存21天按15分钟聚合的数据保存50天。目前支持且只支持三级存储周期。
- STRICT表示数据同步的一致性要求默认为off。
- on 表示强一致,即运行标准的 raft 协议,半数提交返回成功。
- off表示弱一致本地提交即返回成功。
- WALWAL级别默认为1。
- 1写WAL但不执行fsync。
- 2写WAL而且执行fsync。
- VGROUPS数据库中初始vgroup的数目。
- 0表示不压缩。
- 1表示一阶段压缩。
- 2表示两阶段压缩。
- DURATION数据文件存储数据的时间跨度。可以使用加单位的表示形式如 DURATION 100h、DURATION 10d 等,支持 m分钟、h小时和 d三个单位。不加时间单位时默认单位为天如 DURATION 50 表示 50 天。
- WAL_FSYNC_PERIOD:当 WAL 参数设置为 2 时,落盘的周期。默认为 3000单位毫秒。最小为 0表示每次写入立即落盘最大为 180000即三分钟。
- MAXROWS文件块中记录的最大条数默认为 4096 条。
- MINROWS文件块中记录的最小条数默认为 100 条。
- KEEP表示数据文件保存的天数缺省值为 3650取值范围 [1, 365000],且必须大于或等于 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m分钟、h小时和 d三个单位。也可以不写单位如 KEEP 50此时默认单位为天。
- PAGES一个 VNODE 中元数据存储引擎的缓存页个数,默认为 256最小 64。一个 VNODE 元数据存储占用 PAGESIZE \* PAGES默认情况下为 1MB 内存。
- PAGESIZE一个 VNODE 中元数据存储引擎的页大小,单位为 KB默认为 4 KB。范围为 1 16384 1 KB 16 MB。
- PRECISION数据库的时间戳精度。ms 表示毫秒us 表示微秒ns 表示纳秒,默认 ms 毫秒。
- REPLICA表示数据库副本数取值为 1 3默认为 1。在集群中使用副本数必须小于或等于 DNODE 的数目。
- RETENTIONS表示数据的聚合周期和保存时长 RETENTIONS 15s:7d,1m:21d,15m:50d 表示数据原始采集周期为 15 秒,原始数据保存 7 天;按 1 分钟聚合的数据保存 21 天;按 15 分钟聚合的数据保存 50 天。目前支持且只支持三级存储周期。
- STRICT表示数据同步的一致性要求默认为 off。
- on 表示强一致,即运行标准的 raft 协议,半数提交返回成功。
- off 表示弱一致,本地提交即返回成功。
- WAL_LEVELWAL 级别,默认为 1。
- 1 WAL但不执行 fsync。
- 2 WAL而且执行 fsync。
- VGROUPS数据库中初始 vgroup 的数目。
- SINGLE_STABLE表示此数据库中是否只可以创建一个超级表用于超级表列非常多的情况。
- 0表示可以创建多张超级表。
- 1表示只可以创建一张超级表。
- WAL_RETENTION_PERIODwal文件的额外保留策略用于数据订阅。wal的保存时长单位为s。默认为0即落盘后立即删除。-1表示不删除。
- WAL_RETENTION_SIZEwal文件的额外保留策略用于数据订阅。wal的保存的最大上限单位为KB。默认为0即落盘后立即删除。-1表示不删除。
- WAL_ROLL_PERIODwal文件切换时长单位为s。当wal文件创建并写入后经过该时间会自动创建一个新的wal文件。默认为0即仅在落盘时创建新文件。
- WAL_SEGMENT_SIZEwal单个文件大小单位为KB。当前写入文件大小超过上限后会自动创建一个新的wal文件。默认为0即仅在落盘时创建新文件。
- 0表示可以创建多张超级表。
- 1表示只可以创建一张超级表。
- WAL_RETENTION_PERIODwal 文件的额外保留策略用于数据订阅。wal 的保存时长,单位为 s。默认为 0即落盘后立即删除。-1 表示不删除。
- WAL_RETENTION_SIZEwal 文件的额外保留策略用于数据订阅。wal 的保存的最大上限,单位为 KB。默认为 0即落盘后立即删除。-1 表示不删除。
- WAL_ROLL_PERIODwal 文件切换时长,单位为 s。当 wal 文件创建并写入后,经过该时间,会自动创建一个新的 wal 文件。默认为 0即仅在落盘时创建新文件。
- WAL_SEGMENT_SIZEwal 单个文件大小,单位为 KB。当前写入文件大小超过上限后会自动创建一个新的 wal 文件。默认为 0即仅在落盘时创建新文件。
### 创建数据库示例
@ -104,10 +105,10 @@ DROP DATABASE [IF EXISTS] db_name
```sql
ALTER DATABASE db_name [alter_database_options]
alter_database_options:
alter_database_option ...
alter_database_option: {
CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'}
| CACHESIZE value
@ -118,7 +119,7 @@ alter_database_option: {
```
:::note
其它参数在3.0.0.0中暂不支持修改
其它参数在 3.0.0.0 中暂不支持修改
:::
@ -139,3 +140,17 @@ SHOW CREATE DATABASE db_name;
常用于数据库迁移。对一个已经存在的数据库,返回其创建语句;在另一个集群中执行该语句,就能得到一个设置完全相同的 Database。
### 查看数据库参数
```sql
SHOW DATABASES \G;
```
会列出系统中所有数据库的配置参数,并且每行只显示一个参数。
## 删除过期数据
```sql
TRIM DATABASE db_name;
```
删除过期数据,并根据多级存储的配置归整数据。

View File

@ -139,6 +139,7 @@ ALTER TABLE tb_name RENAME COLUMN old_col_name new_col_name
## 修改子表
```sql
ALTER TABLE [db_name.]tb_name alter_table_clause
alter_table_clause: {
@ -153,6 +154,7 @@ alter_table_option: {
TTL value
| COMMENT 'string_value'
}
```
**使用说明**
1. 对子表的列和标签的修改,除了更改标签值以外,都要通过超级表才能进行。
@ -192,5 +194,5 @@ SHOW CREATE TABLE tb_name;
### 获取表结构信息
```
DESCRIBE tb_name;
DESCRIBE [db_name.]tb_name;
```

View File

@ -46,7 +46,7 @@ SHOW CREATE STABLE stb_name;
### 获取超级表的结构信息
```
DESCRIBE stb_name;
DESCRIBE [db_name.]stb_name;
```
## 删除超级表

View File

@ -218,7 +218,7 @@ GROUP BY 子句中的表达式可以包含表或视图中的任何列,这些
PARTITION BY 子句是 TDengine 特色语法,按 part_list 对数据进行切分,在每个切分的分片中进行计算。
详见 [TDengine 特色查询](taos-sql/distinguished)
详见 [TDengine 特色查询](./distinguished)
## ORDER BY

View File

@ -13,7 +13,7 @@ toc_max_heading_level: 4
#### ABS
```sql
SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:获得指定列的绝对值
@ -31,7 +31,7 @@ toc_max_heading_level: 4
#### ACOS
```sql
SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:获得指定列的反余弦结果
@ -49,7 +49,7 @@ toc_max_heading_level: 4
#### ASIN
```sql
SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:获得指定列的反正弦结果
@ -68,7 +68,7 @@ toc_max_heading_level: 4
#### ATAN
```sql
SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:获得指定列的反正切结果
@ -86,7 +86,7 @@ toc_max_heading_level: 4
#### CEIL
```
```sql
SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
@ -108,7 +108,7 @@ SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### COS
```sql
SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:获得指定列的余弦结果
@ -125,7 +125,7 @@ SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### FLOOR
```
```sql
SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
@ -135,7 +135,7 @@ SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### LOG
```sql
SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause]
SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:获得指定列对于底数 base 的对数
@ -154,7 +154,7 @@ SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### POW
```sql
SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:获得指定列的指数为 power 的幂
@ -172,7 +172,7 @@ SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### ROUND
```
```sql
SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
@ -183,7 +183,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### SIN
```sql
SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:获得指定列的正弦结果
@ -201,7 +201,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### SQRT
```sql
SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:获得指定列的平方根
@ -219,7 +219,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### TAN
```sql
SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:获得指定列的正切结果
@ -240,8 +240,8 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### CHAR_LENGTH
```
SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
```sql
SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:以字符计数的字符串长度。
@ -257,7 +257,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### CONCAT
```sql
SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:字符串连接函数。
@ -273,8 +273,8 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### CONCAT_WS
```
SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
```sql
SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:带分隔符的字符串连接函数。
@ -290,8 +290,8 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### LENGTH
```
SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
```sql
SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:以字节计数的字符串长度。
@ -307,8 +307,8 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### LOWER
```
SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause]
```sql
SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:将字符串参数值转换为全小写字母。
@ -324,8 +324,8 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### LTRIM
```
SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
```sql
SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:返回清除左边空格后的字符串。
@ -341,8 +341,8 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### RTRIM
```
SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
```sql
SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:返回清除右边空格后的字符串。
@ -358,8 +358,8 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### SUBSTR
```
SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
```sql
SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:从源字符串 str 中的指定位置 pos 开始取一个长度为 len 的子串并返回。
@ -375,8 +375,8 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### UPPER
```
SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause]
```sql
SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:将字符串参数值转换为全大写字母。
@ -397,7 +397,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### CAST
```sql
SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**:数据类型转换函数,输入参数 expression 支持普通列、常量、标量函数及它们之间的四则运算,只适用于 select 子句中。
@ -587,7 +587,7 @@ TDengine 支持针对数据的聚合查询。提供如下聚合函数。
### AVG
```
```sql
SELECT AVG(field_name) FROM tb_name [WHERE clause];
```
@ -602,7 +602,7 @@ SELECT AVG(field_name) FROM tb_name [WHERE clause];
### COUNT
```
```sql
SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
```
@ -623,7 +623,7 @@ SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
### ELAPSED
```mysql
```sql
SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]];
```
@ -649,7 +649,7 @@ SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE
### LEASTSQUARES
```
```sql
SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause];
```
@ -664,7 +664,7 @@ SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]
### MODE
```
```sql
SELECT MODE(field_name) FROM tb_name [WHERE clause];
```
@ -679,7 +679,7 @@ SELECT MODE(field_name) FROM tb_name [WHERE clause];
### SPREAD
```
```sql
SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
@ -694,7 +694,7 @@ SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
### STDDEV
```
```sql
SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
```
@ -709,7 +709,7 @@ SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
### SUM
```
```sql
SELECT SUM(field_name) FROM tb_name [WHERE clause];
```
@ -724,7 +724,7 @@ SELECT SUM(field_name) FROM tb_name [WHERE clause];
### HYPERLOGLOG
```
```sql
SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
@ -741,7 +741,7 @@ SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
### HISTOGRAM
```
```sql
SELECT HISTOGRAM(field_namebin_type, bin_description, normalized) FROM tb_name [WHERE clause];
```
@ -775,7 +775,7 @@ SELECT HISTOGRAM(field_namebin_type, bin_description, normalized) FROM tb_nam
### APERCENTILE
```
```sql
SELECT APERCENTILE(field_name, P[, algo_type])
FROM { tb_name | stb_name } [WHERE clause]
```
@ -790,7 +790,7 @@ FROM { tb_name | stb_name } [WHERE clause]
### BOTTOM
```
```sql
SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
```
@ -810,7 +810,7 @@ SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
### FIRST
```
```sql
SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
@ -830,7 +830,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
### INTERP
```
```sql
SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
```
@ -854,7 +854,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [
### LAST
```
```sql
SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
@ -875,7 +875,7 @@ SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
### LAST_ROW
```
```sql
SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
```
@ -894,7 +894,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
### MAX
```
```sql
SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
```
@ -909,7 +909,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
### MIN
```
```sql
SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
```
@ -924,7 +924,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
### PERCENTILE
```
```sql
SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
```
@ -941,7 +941,7 @@ SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
### TAIL
```
```sql
SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
```
@ -958,7 +958,7 @@ SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
### TOP
```
```sql
SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
```
@ -978,7 +978,7 @@ SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
### UNIQUE
```
```sql
SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause];
```
@ -998,7 +998,7 @@ SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause];
### CSUM
```sql
SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明**累加和Cumulative sum输出行与输入行数相同。
@ -1020,7 +1020,7 @@ SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause];
### DERIVATIVE
```
```sql
SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause];
```
@ -1037,9 +1037,9 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER
### DIFF
```sql
SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHERE clause];
```
```sql
SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHERE clause];
```
**功能说明**:统计表中某列的值与前一行对应值的差。 ignore_negative 取值为 0|1 , 可以不填,默认值为 0. 不忽略负值。ignore_negative 为 1 时表示忽略负数。
@ -1054,7 +1054,7 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER
### IRATE
```
```sql
SELECT IRATE(field_name) FROM tb_name WHERE clause;
```
@ -1069,7 +1069,7 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause;
### MAVG
```sql
SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明** 计算连续 k 个值的移动平均数moving average。如果输入行数小于 k则无结果输出。参数 k 的合法输入范围是 1≤ k ≤ 1000。
@ -1091,7 +1091,7 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause;
### SAMPLE
```sql
SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
```
**功能说明** 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。
@ -1111,7 +1111,7 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause;
### STATECOUNT
```
```sql
SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clause];
```
@ -1166,7 +1166,7 @@ SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [W
### TWA
```
```sql
SELECT TWA(field_name) FROM tb_name WHERE clause;
```
@ -1185,7 +1185,7 @@ SELECT TWA(field_name) FROM tb_name WHERE clause;
### DATABASE
```
```sql
SELECT DATABASE();
```
@ -1194,7 +1194,7 @@ SELECT DATABASE();
### CLIENT_VERSION
```
```sql
SELECT CLIENT_VERSION();
```
@ -1202,7 +1202,7 @@ SELECT CLIENT_VERSION();
### SERVER_VERSION
```
```sql
SELECT SERVER_VERSION();
```
@ -1210,7 +1210,7 @@ SELECT SERVER_VERSION();
### SERVER_STATUS
```
```sql
SELECT SERVER_VERSION();
```

View File

@ -0,0 +1,66 @@
---
sidebar_label: 消息队列
title: 消息队列
---
TDengine 3.0.0.0 开始对消息队列做了大幅的优化和增强以简化用户的解决方案。
## 创建订阅主题
```sql
CREATE TOPIC [IF NOT EXISTS] topic_name AS {subquery | DATABASE db_name | STABLE stb_name };
```
订阅主题包括三种:列订阅、超级表订阅和数据库订阅。
**列订阅是**用 subquery 描述,支持过滤和标量函数和 UDF 标量函数,不支持 JOIN、GROUP BY、窗口切分子句、聚合函数和 UDF 聚合函数。列订阅规则如下:
1. TOPIC 一旦创建则返回结果的字段确定
2. 被订阅或用于计算的列不可被删除、修改
3. 列可以新增,但新增的列不出现在订阅结果字段中
4. 对于 select \*,则订阅展开为创建时所有的列(子表、普通表为数据列,超级表为数据列加标签列)
**超级表订阅和数据库订阅**规则如下:
1. 被订阅主体的 schema 变更不受限
2. 返回消息中 schema 是块级别的,每块的 schema 可能不一样
3. 列变更后写入的数据若未落盘,将以写入时的 schema 返回
4. 列变更后写入的数据若未已落盘,将以落盘时的 schema 返回
## 删除订阅主题
```sql
DROP TOPIC [IF EXISTS] topic_name;
```
此时如果该订阅主题上存在 consumer则此 consumer 会收到一个错误。
## 查看订阅主题
## SHOW TOPICS
```sql
SHOW TOPICS;
```
显示当前数据库下的所有主题的信息。
## 创建消费组
消费组的创建只能通过 TDengine 客户端驱动或者连接器所提供的 API 创建。
## 删除消费组
```sql
DROP CONSUMER GROUP [IF EXISTS] cgroup_name ON topic_name;
```
删除主题 topic_name 上的消费组 cgroup_name。
## 查看消费组
```sql
SHOW CONSUMERS;
```
显示当前数据库下所有活跃的消费者的信息。

View File

@ -9,8 +9,8 @@ title: 命名与边界限制
2. 允许英文字符或下划线开头,不允许以数字开头
3. 不区分大小写
4. 转义后表(列)名规则:
为了兼容支持更多形式的表TDengine 引入新的转义符 "`"。可用让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查
转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一
为了兼容支持更多形式的表TDengine 引入新的转义符 "`"。可用让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查
转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一
例如:\`aBc\` 和 \`abc\` 是不同的表(列)名,但是 abc 和 aBc 是相同的表(列)名。
需要注意的是转义字符中的内容必须是可打印字符。
@ -23,28 +23,30 @@ title: 命名与边界限制
## 一般限制
- 数据库名最大长度为 32
- 数据库名最大长度为 32
- 表名最大长度为 192不包括数据库名前缀和分隔符
- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
- 列名最大长度为 64
- 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。
- 标签名最大长度为 64
- 最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16KB
- SQL 语句最大长度 1048576 个字符,也可通过客户端配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576
- SELECT 语句的查询结果,最多允许返回 4096 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
- 最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16KB
- SQL 语句最大长度 1048576 个字符,也可通过客户端配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576
- SELECT 语句的查询结果,最多允许返回 4096 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
- 数据库的副本数只能设置为 1 或 3
- 用户名的最大长度是 23 个字节
- 用户密码的最大长度是 15 个字节
- 总数据行数取决于可用资源
- 总数据行数取决于可用资源
- 单个数据库的虚拟结点数上限为 1024
## 表(列)名合法性说明
### TDengine 中的表(列)名命名规则如下:
只能由字母、数字、下划线构成,数字不能在首位,长度不能超过 192 字节,不区分大小写。这里表名称不包括数据库名的前缀和分隔符。
### 转义后表(列)名规则:
为了兼容支持更多形式的表TDengine 引入新的转义符 "`",可以避免表名与关键词的冲突,同时不受限于上述表名合法性约束检查,转义符不计入表名的长度。
转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一。

View File

@ -0,0 +1,154 @@
---
sidebar_label: 集群管理
title: 集群管理
---
组成 TDengine 集群的物理实体是 dnode (data node 的缩写),它是一个运行在操作系统之上的进程。在 dnode 中可以建立负责时序数据存储的 vnode (virtual node),在多节点集群环境下当某个数据库的 replica 为 3 时,该数据库中的每个 vgroup 由 3 个 vnode 组成;当数据库的 replica 为 1 时,该数据库中的每个 vgroup 由 1 个 vnode 组成。如果要想配置某个数据库为多副本,则集群中的 dnode 数量至少为 3。在 dnode 还可以创建 mnode (management node),单个集群中最多可以创建三个 mnode。在 TDengine 3.0.0.0 中为了支持存算分离,引入了一种新的逻辑节点 qnode (query node)qnode 和 vnode 既可以共存在一个 dnode 中,也可以完全分离在不同的 dnode 上。
## 创建数据节点
```sql
CREATE DNODE {dnode_endpoint | dnode_host_name PORT port_val}
```
其中 `dnode_endpoint` 是形成 `hostname:port`的格式。也可以分开指定 hostname 和 port。
实际操作中推荐先创建 dnode再启动相应的 dnode 进程,这样该 dnode 就可以立即根据其配置文件中的 firstEP 加入集群。每个 dnode 在加入成功后都会被分配一个 ID。
## 查看数据节点
```sql
SHOW DNODES;
```
可以列出集群中所有的数据节点,所列出的字段有 dnode 的 ID, endpoint, status。
## 删除数据节点
```sql
DROP DNODE {dnode_id | dnode_endpoint}
```
可以用 dnoe_id 或 endpoint 两种方式从集群中删除一个 dnode。注意删除 dnode 不等于停止相应的进程。实际中推荐先将一个 dnode 删除之后再停止其所对应的进程。
## 修改数据节点配置
```sql
ALTER DNODE dnode_id dnode_option
ALTER ALL DNODES dnode_option
dnode_option: {
'resetLog'
| 'balance' value
| 'monitor' value
| 'debugFlag' value
| 'monDebugFlag' value
| 'vDebugFlag' value
| 'mDebugFlag' value
| 'cDebugFlag' value
| 'httpDebugFlag' value
| 'qDebugflag' value
| 'sdbDebugFlag' value
| 'uDebugFlag' value
| 'tsdbDebugFlag' value
| 'sDebugflag' value
| 'rpcDebugFlag' value
| 'dDebugFlag' value
| 'mqttDebugFlag' value
| 'wDebugFlag' value
| 'tmrDebugFlag' value
| 'cqDebugFlag' value
}
```
上面语法中的这些可修改配置项其配置方式与 dnode 配置文件中的配置方式相同,区别是修改是动态的立即生效,且不需要重启 dnode。
## 添加管理节点
```sql
CREATE MNODE ON DNODE dnode_id
```
系统启动默认在 firstEP 节点上创建一个 MNODE用户可以使用此语句创建更多的 MNODE 来提高系统可用性。一个集群最多存在三个 MNODE一个 DNODE 上只能创建一个 MNODE。
## 查看管理节点
```sql
SHOW MNODES;
```
列出集群中所有的管理节点,包括其 ID所在 DNODE 以及状态。
## 删除管理节点
```sql
DROP MNODE ON DNODE dnode_id;
```
删除 dnode_id 所指定的 DNODE 上的 MNODE。
## 创建查询节点
```sql
CREATE QNODE ON DNODE dnode_id;
```
系统启动默认没有 QNODE用户可以创建 QNODE 来实现计算和存储的分离。一个 DNODE 上只能创建一个 QNODE。一个 DNODE 的 `supportVnodes` 参数如果不为 0同时又在其上创建上 QNODE则在该 dnode 中既有负责存储管理的 vnode 又有负责查询计算的 qnode如果还在该 dnode 上创建了 mnode则一个 dnode 上最多三种逻辑节点都可以存在。但通过配置也可以使其彻底分离。将一个 dnode 的`supportVnodes`配置为 0可以选择在其上创建 mnode 或者 qnode 中的一种,这样可以实现三种逻辑节点在物理上的彻底分离。
## 查看查询节点
```sql
SHOW QNODES;
```
列出集群中所有查询节点,包括 ID及所在 DNODE。
## 删除查询节点
```sql
DROP QNODE ON DNODE dnode_id;
```
删除 ID 为 dnode_id 的 DNODE 上的 QNODE但并不会影响该 dnode 的状态。
## 修改客户端配置
如果将客户端也看作广义的集群的一部分,可以通过如下命令动态修改客户端配置参数。
```sql
ALTER LOCAL local_option
local_option: {
'resetLog'
| 'rpcDebugFlag' value
| 'tmrDebugFlag' value
| 'cDebugFlag' value
| 'uDebugFlag' value
| 'debugFlag' value
}
```
上面语法中的参数与在配置文件中配置客户端的用法相同,但不需要重启客户端,修改后立即生效。
## 查看客户端配置
```sql
SHOW LOCAL VARIABLES;
```
## 合并 vgroup
```sql
MERGE VGROUP vgroup_no1 vgroup_no2;
```
如果在系统实际运行一段时间后,因为不同时间线的数据特征不同导致在 vgroups 之间的数据和负载分布不均衡,可以通过合并或拆分 vgroups 的方式逐步实现负载均衡。
## 拆分 vgroup
```sql
SPLIT VGROUP vgroup_no;
```
会创建一个新的 vgroup并将指定 vgroup 中的数据按照一致性 HASH 迁移一部分到新的 vgroup 中。此过程中,原 vgroup 可以正常提供读写服务。

View File

@ -3,22 +3,15 @@ sidebar_label: 元数据库
title: 元数据库
---
TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数据库元数据、数据库系统信息和状态的访问,例如数据库或表的名称,当前执行的 SQL 语句等。
TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数据库元数据、数据库系统信息和状态的访问,例如数据库或表的名称,当前执行的 SQL 语句等。该数据库存储有关 TDengine 维护的所有其他数据库的信息。它包含多个只读表。实际上,这些表都是视图,而不是基表,因此没有与它们关联的文件。所以对这些表只能查询,不能进行 INSERT 等写入操作。`INFORMATION_SCHEMA` 数据库旨在以一种更一致的方式来提供对 TDengine 支持的各种 SHOW 语句(如 SHOW TABLES、SHOW DATABASES所提供的信息的访问。与 SHOW 语句相比,使用 SELECT ... FROM INFORMATION_SCHEMA.tablename 具有以下优点:
`INFORMATION_SCHEMA` 是 TDengine 启动时自动创建的数据库,该数据库存储有关 TDengine 维护的所有其他数据库的信息。它包含多个只读表。实际上,这些表都是视图,而不是基表,因此没有与它们关联的文件。所以对这些表只能查询,不能进行 INSERT 等写入操作。
1. 可以使用 USE 语句将 INFORMATION_SCHEMA 设为默认数据库
2. 可以使用 SELECT 语句熟悉的语法,只需要学习一些表名和列名
3. 可以对查询结果进行筛选、排序等操作。事实上,可以使用任意 TDengine 支持的 SELECT 语句对 INFORMATION_SCHEMA 中的表进行查询
4. TDengine 在后续演进中可以灵活的添加已有 INFORMATION_SCHEMA 中表的列,而不用担心对既有业务系统造成影响
5. 与其他数据库系统更具互操作性。例如Oracle 数据库用户熟悉查询 Oracle 数据字典中的表
可以使用 USE 语句将 INFORMATION_SCHEMA 设为默认数据库。
INFORMATION_SCHEMA 旨在以一种更一致的方式来提供对 TDengine 支持的各种 SHOW 语句(如 SHOW TABLES、SHOW DATABASES提供的信息的访问。与 SHOW 语句相比,使用 SELECT ... FROM INFORMATION_SCHEMA.tablename 具有以下优点:
您可以使用 SELECT 语句熟悉的语法,只需要学习一些表名和列名。
您可以对查询结果进行筛选、排序等操作,事实上,您可以使用任意 TDengine 支持的 SELECT 语句对 INFORMATION_SCHEMA 中的表进行查询。
TDengine 在后续演进中可以灵活的添加已有 INFORMATION_SCHEMA 中表的列,而不用担心对既有业务系统造成影响。
此技术与其他数据库系统更具互操作性。例如Oracle 数据库用户熟悉查询 Oracle 数据字典中的表。
由于 SHOW 语句已经被开发者熟悉的和广泛使用,所以它们仍然是可用的。
Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们仍然被保留。
本章将详细介绍 `INFORMATION_SCHEMA` 这个内置元数据库中的表和表结构。
@ -87,8 +80,8 @@ TODO
| 9 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB |
| 10 | minrows | INT | 文件块中记录的最大条数 |
| 11 | maxrows | INT | 文件块中记录的最小条数 |
| 12 | wallevel | INT | WAL 级别 |
| 13 | fsync | INT | 数据落盘周期 |
| 12 | wal_level | INT | WAL 级别 |
| 13 | walfsync_period | INT | 数据落盘周期 |
| 14 | comp | INT | 数据压缩方式 |
| 15 | precision | BINARY(2) | 时间分辨率 |
| 16 | status | BINARY(10) | 数据库状态 |

View File

@ -23,6 +23,23 @@ password最长为128字节合法字符包括"a-zA-Z0-9!?$%^&*()_+={[}]:;@~
DROP USER user_name;
```
## 修改用户信息
```sql
ALTER USER user_name alter_user_clause
alter_user_clause: {
PASS 'literal'
| ENABLE value
| SYSINFO value
}
```
- PASS修改用户密码。
- ENABLE修改用户是否启用。1表示启用此用户0表示禁用此用户。
- SYSINFO修改用户是否可查看系统信息。1表示可以查看系统信息0表示不可以查看系统信息。
## 授权
```sql

View File

@ -0,0 +1,28 @@
---
sidebar_label: 自定义函数
title: 用户自定义函数
---
除了 TDengine 的内置函数以外用户还可以编写自己的函数逻辑并加入TDengine系统中。
## 创建函数
```sql
CREATE [AGGREGATE] FUNCTION func_name AS library_path OUTPUTTYPE type_name [BUFSIZE value]
```
语法说明:
AGGREGATE标识此函数是标量函数还是聚集函数。
func_name函数名必须与函数实现中udfNormalFunc的实际名称一致。
library_path包含UDF函数实现的动态链接库的绝对路径是在客户端侧主机上的绝对路径。
OUTPUTTYPE标识此函数的返回类型。
BUFSIZE中间结果的缓冲区大小单位是字节。不设置则默认为0。最大不可超过512字节。
关于如何开发自定义函数,请参考 [UDF使用说明](../develop/udf)。
## 删除自定义函数
```sql
DROP FUNCTION func_name
```

View File

@ -0,0 +1,47 @@
---
sidebar_label: 索引
title: 使用索引
---
TDengine 从 3.0.0.0 版本开始引入了索引功能,支持 SMA 索引和 FULLTEXT 索引。
## 创建索引
```sql
CREATE FULLTEXT INDEX index_name ON tb_name (col_name [, col_name] ...)
CREATE SMA INDEX index_name ON tb_name index_option
index_option:
FUNCTION(functions) INTERVAL(interval_val [, interval_offset]) [SLIDING(sliding_val)] [WATERMARK(watermark_val)] [MAX_DELAY(max_delay_val)]
functions:
function [, function] ...
```
### SMA 索引
对指定列按 INTERVAL 子句定义的时间窗口创建进行预聚合计算,预聚合计算类型由 functions_string 指定。SMA 索引能提升指定时间段的聚合查询的性能。目前,限制一个超级表只能创建一个 SMA INDEX。
- 支持的函数包括 MAX、MIN 和 SUM。
- WATERMARK: 最小单位毫秒,取值范围 [0ms, 900000ms],默认值为 5 秒,只可用于超级表。
- MAX_DELAY: 最小单位毫秒,取值范围 [1ms, 900000ms],默认值为 interval 的值(但不能超过最大值),只可用于超级表。注:不建议 MAX_DELAY 设置太小,否则会过于频繁的推送结果,影响存储和查询性能,如无特殊需求,取默认值即可。
### FULLTEXT 索引
对指定列建立文本索引可以提升含有文本过滤的查询的性能。FULLTEXT 索引不支持 index_option 语法。现阶段只支持对 JSON 类型的标签列创建 FULLTEXT 索引。不支持多列联合索引,但可以为每个列分布创建 FULLTEXT 索引。
## 删除索引
```sql
DROP INDEX index_name;
```
## 查看索引
````sql
```sql
SHOW INDEXES FROM tbl_name [FROM db_name];
````
显示在所指定的数据库或表上已创建的索引。

View File

@ -0,0 +1,38 @@
---
sidebar_label: 异常恢复
title: 异常恢复
---
在一个复杂的应用场景中,连接和查询任务等有可能进入一种错误状态或者耗时过长迟迟无法结束,此时需要有能够终止这些连接或任务的方法。
## 终止连接
```sql
KILL CONNECTION conn_id;
```
conn_id 可以通过 `SHOW CONNECTIONS` 获取。
## 终止查询
```sql
SHOW QUERY query_id;
```
query_id 可以通过 `SHOW QUERIES` 获取。
## 终止事务
```sql
KILL TRANSACTION trans_id
```
trans_id 可以通过 `SHOW TRANSACTIONS` 获取。
## 重置客户端缓存
```sql
RESET QUERY CACHE;
```
如果在多客户端情况下出现元数据不同步的情况,可以用这条命令强制清空客户端缓存,随后客户端会从服务端拉取最新的元数据。

View File

@ -1,42 +0,0 @@
---
title: 用户管理
---
系统管理员可以在 CLI 界面里添加、删除用户也可以修改密码。CLI 里 SQL 语法如下:
```sql
CREATE USER <user_name> PASS <'password'>;
```
创建用户,并指定用户名和密码,密码需要用单引号引起来,单引号为英文半角
```sql
DROP USER <user_name>;
```
删除用户,限 root 用户使用
```sql
ALTER USER <user_name> PASS <'password'>;
```
修改用户密码,为避免被转换为小写,密码需要用单引号引用,单引号为英文半角
```sql
ALTER USER <user_name> PRIVILEGE <write|read>;
```
修改用户权限为write 或 read不需要添加单引号
说明:系统内共有 super/write/read 三种权限级别,但目前不允许通过 alter 指令把 super 权限赋予用户。
```sql
SHOW USERS;
```
显示所有用户
:::note
SQL 语法中,< >表示需要用户输入的部分,但请不要输入< >本身。
:::

View File

@ -1,53 +0,0 @@
---
title: 系统连接、任务查询管理
---
系统管理员可以从 CLI 查询系统的连接、正在进行的查询、流式计算,并且可以关闭连接、停止正在进行的查询和流式计算。
## 显示数据库的连接
```sql
SHOW CONNECTIONS;
```
其结果中的一列显示 ip:port, 为连接的 IP 地址和端口号。
## 强制关闭数据库连接
```sql
KILL CONNECTION <connection-id>;
```
其中的 connection-id 是 SHOW CONNECTIONS 中显示的第一列的数字。
## 显示数据查询
```sql
SHOW QUERIES;
```
其中第一列显示的以冒号隔开的两个数字为 query-id为发起该 query 应用连接的 connection-id 和查询次数。
## 强制关闭数据查询
```sql
KILL QUERY <query-id>;
```
其中 query-id 是 SHOW QUERIES 中显示的 connection-id:query-no 字串如“105:2”拷贝粘贴即可。
## 显示连续查询
```sql
SHOW STREAMS;
```
其中第一列显示的以冒号隔开的两个数字为 stream-id, 为启动该 stream 应用连接的 connection-id 和发起 stream 的次数。
## 强制关闭连续查询
```sql
KILL STREAM <stream-id>;
```
其中的 stream-id 是 SHOW STREAMS 中显示的 connection-id:stream-no 字串,如 103:2拷贝粘贴即可。

View File

@ -0,0 +1,4 @@
---
sidebar_label: taosX
title: 使用 taosX 在集群间复制数据
---

View File

@ -1 +1 @@
label: 参考指南
label: 参考手册

View File

@ -1,8 +1,8 @@
---
title: 参考指南
title: 参考手册
---
参考指南是对 TDengine 本身、 TDengine 各语言连接器及自带的工具最详细的介绍。
参考手册是对 TDengine 本身、 TDengine 各语言连接器及自带的工具最详细的介绍。
```mdx-code-block
import DocCardList from '@theme/DocCardList';

View File

@ -5,33 +5,29 @@ title: 容量规划
使用 TDengine 来搭建一个物联网大数据平台计算资源、存储资源需要根据业务场景进行规划。下面分别讨论系统运行所需要的内存、CPU 以及硬盘空间。
## 内存需求
## 服务端内存需求
每个 Database 可以创建固定数目的 vgroup默认与 CPU 核数相同,可通过 maxVgroupsPerDb 配置vgroup 中的每个副本会是一个 vnode每个 vnode 会占用固定大小的内存(大小与数据库的配置参数 blocks 和 cache 有关);每个 Table 会占用与标签总长度有关的内存;此外,系统会有一些固定的内存开销。因此,每个 DB 需要的系统内存可通过如下公式计算:
每个 Database 可以创建固定数目的 vgroup默认 2 个 vgroup在创建数据库时可以通过`vgroups <num>`参数来指定,其副本数由参数`replica <num>`指定。vgroup 中的每个副本会是一个 vnode所以每个数据库占用的内存由以下几个参数决定
- vgroups
- replica
- buffer
- pages
- pagesize
- cachesize
关于这些参数的详细说明请参考 [数据库管理](../taos-sql/database)。
一个数据库所需要的内存大小等于
```
Database Memory Size = maxVgroupsPerDb * replica * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB)
vgroups * replica * (buffer + pages * pagesize + cachesize)
```
示例:假设 maxVgroupPerDB 是缺省值 64cache 是缺省大小 16M, blocks 是缺省值 6并且一个 DB 中有 10 万张表,单副本,标签总长度是 256 字节,则这个 DB 总的内存需求为64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M。
但要注意的是这些内存并不需要由单一服务器提供,而是由整个集群中所有数据节点共同负担,相当于由这些数据节点所在的服务器共同负担。如果集群中有不止一个数据库,则所需内存要累加,并由集群中所有服务器共同负担。更复杂的场景是如果集群中的数据节点并非在最初就一次性全部建立,而是随着使用中系统负载的增加逐步增加服务器并增加数据节点,则新创建的数据库会导致新旧数据节点上的负载并不均衡,此时简单的理论计算并不能直接使用,要结合各数据节点的负载情况
在实际的系统运维中,我们通常会更关心 TDengine 服务进程taosd会占用的内存量。
```
taosd 内存总量 = vnode 内存 + mnode 内存 + 查询内存
```
其中:
1. “vnode 内存”指的是集群中所有的 Database 存储分摊到当前 taosd 节点上所占用的内存资源。可以按上文“Database Memory Size”计算公式估算每个 DB 的内存占用量进行加总,再按集群中总共的 TDengine 节点数做平均(如果设置为多副本,则还需要乘以对应的副本倍数)。
2. “mnode 内存”指的是集群中管理节点所占用的资源。如果一个 taosd 节点上分布有 mnode 管理节点则内存消耗还需要增加“0.2KB \* 集群中数据表总数”。
3. “查询内存”指的是服务端处理查询请求时所需要占用的内存。单条查询语句至少会占用“0.2KB \* 查询涉及的数据表总数”的内存量。
注意:以上内存估算方法,主要讲解了系统的“必须内存需求”,而不是“内存总数上限”。在实际运行的生产环境中,由于操作系统缓存、资源管理调度等方面的原因,内存规划应当在估算结果的基础上保留一定冗余,以维持系统状态和系统性能的稳定性。并且,生产环境通常会配置系统资源的监控工具,以便及时发现硬件资源的紧缺情况。
最后,如果内存充裕,可以考虑加大 Blocks 的配置,这样更多数据将保存在内存里,提高写入和查询速度。
### 客户端内存需求
## 客户端内存需求
客户端应用采用 taosc 客户端驱动连接服务端,会有内存需求的开销。
@ -77,5 +73,3 @@ Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable
根据上面的内存、CPU、存储的预估就可以知道整个系统需要多少核、多少内存、多少存储空间。如果数据副本数不为 1总需求量需要再乘以副本数。
因为 TDengine 具有很好的水平扩展能力,根据总量,再根据单个物理机或虚拟机的资源,就可以轻松决定需要购置多少台物理机或虚拟机了。
**立即计算 CPU、内存、存储请参见[资源估算方法](https://www.taosdata.com/config/config.html)。**

View File

@ -10,19 +10,21 @@ TDengine 接收到应用的请求数据包时,先将请求的原始数据包
涉及的系统配置参数有两个:
- walLevelWAL 级别0不写 WAL; 1写 WAL, 但不执行 fsync; 2写 WAL, 而且执行 fsync
- fsync当 walLevel 设置为 2 时,执行 fsync 的周期。设置为 0表示每次写入立即执行 fsync。
- wal_levelWAL 级别1写WAL但不执行fsync。2写WAL而且执行fsync。默认值为 1
- wal_fsync_period当 wal_evel 设置为 2 时,执行 fsync 的周期。设置为 0表示每次写入立即执行 fsync。
如果要 100%的保证数据不丢失,需要将 walLevel 设置为 2fsync 设置为 0。这时写入速度将会下降。但如果应用侧启动的写数据的线程数达到一定的数量(超过 50),那么写入数据的性能也会很不错,只会比 fsync 设置为 3000 毫秒下降 30%左右。
如果要 100%的保证数据不丢失,需要将 wal_level 设置为 2fsync 设置为 0。这时写入速度将会下降。但如果应用侧启动的写数据的线程数达到一定的数量(超过 50),那么写入数据的性能也会很不错,只会比 fsync 设置为 3000 毫秒下降 30%左右。
## 灾备
TDengine 的集群通过多个副本的机制,来提供系统的高可用性,实现灾备能力。
TDengine 的集群通过多个副本的机制,来提供系统的高可用性,同时具备一定的灾备能力。
TDengine 集群是由 mnode 负责管理的,为保证 mnode 的高可靠,可以配置多个 mnode 副本,副本数由系统配置参数 numOfMnodes 决定,为了支持高可靠,需要设置大于 1。为保证元数据的强一致性mnode 副本之间通过同步方式进行数据复制,保证了元数据的强一致性。
TDengine 集群是由 mnode 负责管理的,为保证 mnode 的高可靠,可以配置 三个 mnode 副本。为保证元数据的强一致性mnode 副本之间通过同步方式进行数据复制,保证了元数据的强一致性。
TDengine 集群中的时序数据的副本数是与数据库关联的,一个集群里可以有多个数据库,每个数据库可以配置不同的副本数。创建数据库时,通过参数 replica 指定副本数。为了支持高可靠,需要设置副本数大于 1
TDengine 集群中的时序数据的副本数是与数据库关联的,一个集群里可以有多个数据库,每个数据库可以配置不同的副本数。创建数据库时,通过参数 replica 指定副本数。为了支持高可靠,需要设置副本数为 3
TDengine 集群的节点数必须大于等于副本数,否则创建表时将报错。
当 TDengine 集群中的节点部署在不同的物理机上并设置多个副本数时就实现了系统的高可靠性无需再使用其他软件或工具。TDengine 企业版还可以将副本部署在不同机房,从而实现异地容灾。
另外一种灾备方式是通过 `taosX` 将一个 TDengine 集群的数据同步复制到物理上位于不同数据中心的另一个 TDengine 集群。其详细使用方法请参考 [taosX 参考手册](../reference/taosX)

View File

@ -29,7 +29,7 @@ static void msg_process(TAOS_RES* msg) {
printf("vg: %d\n", tmq_get_vgroup_id(msg));
if (tmq_get_res_type(msg) == TMQ_RES_TABLE_META) {
tmq_raw_data raw = {0};
int32_t code = tmq_get_raw_meta(msg, &raw);
int32_t code = tmq_get_raw(msg, &raw);
if (code == 0) {
TAOS* pConn = taos_connect("192.168.1.86", "root", "taosdata", NULL, 0);
if (pConn == NULL) {
@ -50,7 +50,7 @@ static void msg_process(TAOS_RES* msg) {
}
taos_free_result(pRes);
int32_t ret = taos_write_raw_meta(pConn, raw);
int32_t ret = tmq_write_raw(pConn, raw);
printf("write raw data: %s\n", tmq_err2str(ret));
taos_close(pConn);
}

View File

@ -49,18 +49,25 @@ static void msg_process(TAOS_RES* msg) {
printf("meta result: %s\n", result);
}
tmq_free_json_meta(result);
tmq_raw_data raw = {0};
tmq_get_raw_meta(msg, &raw);
int32_t ret = taos_write_raw_meta(pConn, raw);
printf("write raw meta: %s\n", tmq_err2str(ret));
}
if(tmq_get_res_type(msg) == TMQ_RES_DATA){
int32_t ret =taos_write_raw_data(pConn, msg);
printf("write raw data: %s\n", tmq_err2str(ret));
}
tmq_raw_data raw = {0};
tmq_get_raw(msg, &raw);
int32_t ret = tmq_write_raw(pConn, raw);
printf("write raw data: %s\n", tmq_err2str(ret));
// else{
// while(1){
// int numOfRows = 0;
// void *pData = NULL;
// taos_fetch_raw_block(msg, &numOfRows, &pData);
// if(numOfRows == 0) break;
// printf("write data: tbname:%s, numOfRows:%d\n", tmq_get_table_name(msg), numOfRows);
// int ret = taos_write_raw_block(pConn, numOfRows, pData, tmq_get_table_name(msg));
// printf("write raw data: %s\n", tmq_err2str(ret));
// }
// }
taos_close(pConn);
}
@ -121,7 +128,7 @@ int32_t init_env() {
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into ct0 values(now, 1, 2, 'a')");
pRes = taos_query(pConn, "insert into ct0 values(1626006833600, 1, 2, 'a')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct0, reason:%s\n", taos_errstr(pRes));
return -1;
@ -142,7 +149,7 @@ int32_t init_env() {
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into ct1 values(now, 3, 4, 'b')");
pRes = taos_query(pConn, "insert into ct1 values(1626006833600, 3, 4, 'b')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes));
return -1;
@ -156,7 +163,7 @@ int32_t init_env() {
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into ct3 values(now, 5, 6, 'c') ct1 values(now+1s, 2, 3, 'sds') (now+2s, 4, 5, 'ddd') ct0 values(now+1s, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')");
pRes = taos_query(pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, 'ddd') ct0 values(1626006833602, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
return -1;
@ -177,7 +184,14 @@ int32_t init_env() {
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into ct3 values(now+7s, 53, 63, 'cffffffffffffffffffffffffffff', 8989898899999) (now+9s, 51, 62, 'c333', 940)");
pRes = taos_query(pConn, "insert into ct3 values(1626006833605, 53, 63, 'cffffffffffffffffffffffffffff', 8989898899999) (1626006833609, 51, 62, 'c333', 940)");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into ct3 select * from ct1");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
return -1;
@ -198,19 +212,26 @@ int32_t init_env() {
}
taos_free_result(pRes);
// pRes = taos_query(pConn, "drop table ct3 ct1");
// if (taos_errno(pRes) != 0) {
// printf("failed to drop child table ct3, reason:%s\n", taos_errstr(pRes));
// return -1;
// }
// taos_free_result(pRes);
//
// pRes = taos_query(pConn, "drop table st1");
// if (taos_errno(pRes) != 0) {
// printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
// return -1;
// }
// taos_free_result(pRes);
pRes = taos_query(pConn, "delete from abc1 .ct3 where ts < 1626006833606");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "drop table ct3 ct1");
if (taos_errno(pRes) != 0) {
printf("failed to drop child table ct3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "drop table st1");
if (taos_errno(pRes) != 0) {
printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists n1(ts timestamp, c1 int, c2 nchar(4))");
if (taos_errno(pRes) != 0) {
@ -261,12 +282,12 @@ int32_t init_env() {
}
taos_free_result(pRes);
// pRes = taos_query(pConn, "drop table n1");
// if (taos_errno(pRes) != 0) {
// printf("failed to drop normal table n1, reason:%s\n", taos_errstr(pRes));
// return -1;
// }
// taos_free_result(pRes);
pRes = taos_query(pConn, "drop table n1");
if (taos_errno(pRes) != 0) {
printf("failed to drop normal table n1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table jt(ts timestamp, i int) tags(t json)");
if (taos_errno(pRes) != 0) {
@ -289,21 +310,21 @@ int32_t init_env() {
}
taos_free_result(pRes);
// pRes = taos_query(pConn,
// "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
// "nchar(8), t4 bool)");
// if (taos_errno(pRes) != 0) {
// printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
// return -1;
// }
// taos_free_result(pRes);
//
// pRes = taos_query(pConn, "drop table st1");
// if (taos_errno(pRes) != 0) {
// printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
// return -1;
// }
// taos_free_result(pRes);
pRes = taos_query(pConn,
"create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
"nchar(8), t4 bool)");
if (taos_errno(pRes) != 0) {
printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "drop table st1");
if (taos_errno(pRes) != 0) {
printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
taos_close(pConn);
return 0;

View File

@ -260,17 +260,20 @@ enum tmq_res_t {
};
typedef struct tmq_raw_data{
void* raw_meta;
uint32_t raw_meta_len;
uint16_t raw_meta_type;
void* raw;
uint32_t raw_len;
uint16_t raw_type;
} tmq_raw_data;
typedef enum tmq_res_t tmq_res_t;
DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_raw_meta(TAOS_RES *res, tmq_raw_data *raw_meta);
DLL_EXPORT int32_t taos_write_raw_meta(TAOS *taos, tmq_raw_data raw_meta);
DLL_EXPORT int32_t taos_write_raw_data(TAOS *taos, TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_raw(TAOS_RES *res, tmq_raw_data *raw);
DLL_EXPORT int32_t tmq_write_raw(TAOS *taos, tmq_raw_data raw);
DLL_EXPORT int taos_write_raw_block(TAOS *taos, int numOfRows, char *pData, const char* tbname);
DLL_EXPORT void tmq_free_raw(tmq_raw_data raw);
DLL_EXPORT char *tmq_get_json_meta(TAOS_RES *res); // Returning null means error. Returned result need to be freed by tmq_free_json_meta
DLL_EXPORT void tmq_free_json_meta(char* jsonMeta);
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);

View File

@ -40,6 +40,7 @@ enum {
|| x == TDMT_VND_CREATE_TABLE \
|| x == TDMT_VND_ALTER_TABLE \
|| x == TDMT_VND_DROP_TABLE \
|| x == TDMT_VND_DELETE \
)
// clang-format on

View File

@ -3044,11 +3044,41 @@ typedef struct SDeleteRes {
int64_t skey;
int64_t ekey;
int64_t affectedRows;
char tableFName[TSDB_TABLE_NAME_LEN];
char tsColName[TSDB_COL_NAME_LEN];
} SDeleteRes;
int32_t tEncodeDeleteRes(SEncoder* pCoder, const SDeleteRes* pRes);
int32_t tDecodeDeleteRes(SDecoder* pCoder, SDeleteRes* pRes);
typedef struct {
int32_t msgType;
int32_t msgLen;
void* msg;
} SBatchMsg;
typedef struct {
SMsgHead header;
int32_t msgNum;
SBatchMsg msg[];
} SBatchReq;
typedef struct {
int32_t reqType;
int32_t msgLen;
int32_t rspCode;
void* msg;
} SBatchRsp;
static FORCE_INLINE void tFreeSBatchRsp(void *p) {
if (NULL == p) {
return;
}
SBatchRsp* pRsp = (SBatchRsp*)p;
taosMemoryFree(pRsp->msg);
}
#pragma pack(pop)
#ifdef __cplusplus

View File

@ -136,6 +136,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_DROP_INDEX, "drop-index", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GET_INDEX, "get-index", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GET_TABLE_INDEX, "get-table-index", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_BATCH_META, "batch-meta", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TABLE_CFG, "table-cfg", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_TOPIC, "create-topic", SMCreateTopicReq, SMCreateTopicRsp)
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_TOPIC, "alter-topic", NULL, NULL)
@ -180,6 +181,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_TABLE_META, "vnode-table-meta", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TABLES_META, "vnode-tables-meta", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TABLE_CFG, "vnode-table-cfg", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_BATCH_META, "vnode-batch-meta", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_STB, "vnode-create-stb", SVCreateStbReq, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_STB, "vnode-alter-stb", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_STB, "vnode-drop-stb", SVDropStbReq, NULL)

View File

@ -84,20 +84,20 @@
#define TK_COMP 66
#define TK_DURATION 67
#define TK_NK_VARIABLE 68
#define TK_FSYNC 69
#define TK_MAXROWS 70
#define TK_MINROWS 71
#define TK_KEEP 72
#define TK_PAGES 73
#define TK_PAGESIZE 74
#define TK_PRECISION 75
#define TK_REPLICA 76
#define TK_STRICT 77
#define TK_WAL 78
#define TK_VGROUPS 79
#define TK_SINGLE_STABLE 80
#define TK_RETENTIONS 81
#define TK_SCHEMALESS 82
#define TK_MAXROWS 69
#define TK_MINROWS 70
#define TK_KEEP 71
#define TK_PAGES 72
#define TK_PAGESIZE 73
#define TK_PRECISION 74
#define TK_REPLICA 75
#define TK_STRICT 76
#define TK_VGROUPS 77
#define TK_SINGLE_STABLE 78
#define TK_RETENTIONS 79
#define TK_SCHEMALESS 80
#define TK_WAL_LEVEL 81
#define TK_WAL_FSYNC_PERIOD 82
#define TK_WAL_RETENTION_PERIOD 83
#define TK_WAL_RETENTION_SIZE 84
#define TK_WAL_ROLL_PERIOD 85
@ -188,93 +188,90 @@
#define TK_NK_BOOL 170
#define TK_RATIO 171
#define TK_NK_FLOAT 172
#define TK_COMPACT 173
#define TK_VNODES 174
#define TK_IN 175
#define TK_OUTPUTTYPE 176
#define TK_AGGREGATE 177
#define TK_BUFSIZE 178
#define TK_STREAM 179
#define TK_INTO 180
#define TK_TRIGGER 181
#define TK_AT_ONCE 182
#define TK_WINDOW_CLOSE 183
#define TK_IGNORE 184
#define TK_EXPIRED 185
#define TK_KILL 186
#define TK_CONNECTION 187
#define TK_TRANSACTION 188
#define TK_BALANCE 189
#define TK_VGROUP 190
#define TK_MERGE 191
#define TK_REDISTRIBUTE 192
#define TK_SPLIT 193
#define TK_SYNCDB 194
#define TK_DELETE 195
#define TK_INSERT 196
#define TK_NULL 197
#define TK_NK_QUESTION 198
#define TK_NK_ARROW 199
#define TK_ROWTS 200
#define TK_TBNAME 201
#define TK_QSTART 202
#define TK_QEND 203
#define TK_QDURATION 204
#define TK_WSTART 205
#define TK_WEND 206
#define TK_WDURATION 207
#define TK_CAST 208
#define TK_NOW 209
#define TK_TODAY 210
#define TK_TIMEZONE 211
#define TK_CLIENT_VERSION 212
#define TK_SERVER_VERSION 213
#define TK_SERVER_STATUS 214
#define TK_CURRENT_USER 215
#define TK_COUNT 216
#define TK_LAST_ROW 217
#define TK_BETWEEN 218
#define TK_IS 219
#define TK_NK_LT 220
#define TK_NK_GT 221
#define TK_NK_LE 222
#define TK_NK_GE 223
#define TK_NK_NE 224
#define TK_MATCH 225
#define TK_NMATCH 226
#define TK_CONTAINS 227
#define TK_JOIN 228
#define TK_INNER 229
#define TK_SELECT 230
#define TK_DISTINCT 231
#define TK_WHERE 232
#define TK_PARTITION 233
#define TK_BY 234
#define TK_SESSION 235
#define TK_STATE_WINDOW 236
#define TK_SLIDING 237
#define TK_FILL 238
#define TK_VALUE 239
#define TK_NONE 240
#define TK_PREV 241
#define TK_LINEAR 242
#define TK_NEXT 243
#define TK_HAVING 244
#define TK_RANGE 245
#define TK_EVERY 246
#define TK_ORDER 247
#define TK_SLIMIT 248
#define TK_SOFFSET 249
#define TK_LIMIT 250
#define TK_OFFSET 251
#define TK_ASC 252
#define TK_NULLS 253
#define TK_ID 254
#define TK_NK_BITNOT 255
#define TK_VALUES 256
#define TK_IMPORT 257
#define TK_NK_SEMI 258
#define TK_FILE 259
#define TK_OUTPUTTYPE 173
#define TK_AGGREGATE 174
#define TK_BUFSIZE 175
#define TK_STREAM 176
#define TK_INTO 177
#define TK_TRIGGER 178
#define TK_AT_ONCE 179
#define TK_WINDOW_CLOSE 180
#define TK_IGNORE 181
#define TK_EXPIRED 182
#define TK_KILL 183
#define TK_CONNECTION 184
#define TK_TRANSACTION 185
#define TK_BALANCE 186
#define TK_VGROUP 187
#define TK_MERGE 188
#define TK_REDISTRIBUTE 189
#define TK_SPLIT 190
#define TK_DELETE 191
#define TK_INSERT 192
#define TK_NULL 193
#define TK_NK_QUESTION 194
#define TK_NK_ARROW 195
#define TK_ROWTS 196
#define TK_TBNAME 197
#define TK_QSTART 198
#define TK_QEND 199
#define TK_QDURATION 200
#define TK_WSTART 201
#define TK_WEND 202
#define TK_WDURATION 203
#define TK_CAST 204
#define TK_NOW 205
#define TK_TODAY 206
#define TK_TIMEZONE 207
#define TK_CLIENT_VERSION 208
#define TK_SERVER_VERSION 209
#define TK_SERVER_STATUS 210
#define TK_CURRENT_USER 211
#define TK_COUNT 212
#define TK_LAST_ROW 213
#define TK_BETWEEN 214
#define TK_IS 215
#define TK_NK_LT 216
#define TK_NK_GT 217
#define TK_NK_LE 218
#define TK_NK_GE 219
#define TK_NK_NE 220
#define TK_MATCH 221
#define TK_NMATCH 222
#define TK_CONTAINS 223
#define TK_IN 224
#define TK_JOIN 225
#define TK_INNER 226
#define TK_SELECT 227
#define TK_DISTINCT 228
#define TK_WHERE 229
#define TK_PARTITION 230
#define TK_BY 231
#define TK_SESSION 232
#define TK_STATE_WINDOW 233
#define TK_SLIDING 234
#define TK_FILL 235
#define TK_VALUE 236
#define TK_NONE 237
#define TK_PREV 238
#define TK_LINEAR 239
#define TK_NEXT 240
#define TK_HAVING 241
#define TK_RANGE 242
#define TK_EVERY 243
#define TK_ORDER 244
#define TK_SLIMIT 245
#define TK_SOFFSET 246
#define TK_LIMIT 247
#define TK_OFFSET 248
#define TK_ASC 249
#define TK_NULLS 250
#define TK_ID 251
#define TK_NK_BITNOT 252
#define TK_VALUES 253
#define TK_IMPORT 254
#define TK_NK_SEMI 255
#define TK_FILE 256
#define TK_NK_SPACE 300
#define TK_NK_COMMENT 301

View File

@ -38,6 +38,8 @@ typedef struct SDeleterRes {
int64_t skey;
int64_t ekey;
int64_t affectedRows;
char tableName[TSDB_TABLE_NAME_LEN];
char tsColName[TSDB_COL_NAME_LEN];
} SDeleterRes;
typedef struct SDeleterParam {

View File

@ -64,17 +64,7 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers);
* @param SReadHandle
* @return
*/
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* numOfCols,
SSchemaWrapper** pSchema);
/**
* Set the input data block for the stream scan.
* @param tinfo
* @param input
* @param type
* @return
*/
int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type, bool assignUid);
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* numOfCols, SSchemaWrapper** pSchema);
/**
* Set multiple input data blocks for the stream scan.
@ -84,7 +74,7 @@ int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type, bool
* @param type
* @return
*/
int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type, bool assignUid);
int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type);
/**
* Update the table id list, add or remove.

View File

@ -209,6 +209,7 @@ typedef enum EFuncDataRequired {
} EFuncDataRequired;
EFuncDataRequired fmFuncDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow);
EFuncDataRequired fmFuncDynDataRequired(int32_t funcId, void* pRes, STimeWindow* pTimeWindow);
int32_t fmGetFuncExecFuncs(int32_t funcId, SFuncExecFuncs* pFpSet);
int32_t fmGetScalarFuncExecFuncs(int32_t funcId, SScalarFuncExecFuncs* pFpSet);

View File

@ -151,7 +151,8 @@ typedef struct SVnodeModifyLogicNode {
uint64_t tableId;
uint64_t stableId;
int8_t tableType; // table type
char tableFName[TSDB_TABLE_FNAME_LEN];
char tableName[TSDB_TABLE_NAME_LEN];
char tsColName[TSDB_COL_NAME_LEN];
STimeWindow deleteTimeRange;
SVgroupsInfo* pVgroupList;
SNodeList* pInsertCols;
@ -328,6 +329,7 @@ typedef struct STableScanPhysiNode {
int8_t triggerType;
int64_t watermark;
int8_t igExpired;
bool assignBlockUid;
} STableScanPhysiNode;
typedef STableScanPhysiNode STableSeqScanPhysiNode;
@ -493,7 +495,7 @@ typedef struct SQueryInserterNode {
uint64_t tableId;
uint64_t stableId;
int8_t tableType; // table type
char tableFName[TSDB_TABLE_FNAME_LEN];
char tableName[TSDB_TABLE_NAME_LEN];
int32_t vgId;
SEpSet epSet;
} SQueryInserterNode;
@ -502,7 +504,8 @@ typedef struct SDataDeleterNode {
SDataSinkNode sink;
uint64_t tableId;
int8_t tableType; // table type
char tableFName[TSDB_TABLE_FNAME_LEN];
char tableFName[TSDB_TABLE_NAME_LEN];
char tsColName[TSDB_COL_NAME_LEN];
STimeWindow deleteTimeRange;
SNode* pAffectedRows;
} SDataDeleterNode;

View File

@ -375,6 +375,7 @@ typedef struct SQuery {
int8_t precision;
SCmdMsgInfo* pCmdMsg;
int32_t msgType;
SArray* pTargetTableList;
SArray* pTableList;
SArray* pDbList;
bool showRewrite;

View File

@ -251,8 +251,8 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
(_code) == TSDB_CODE_APP_NOT_READY || (_code) == TSDB_CODE_RPC_BROKEN_LINK)
#define NEED_CLIENT_RM_TBLMETA_REQ(_type) \
((_type) == TDMT_VND_CREATE_TABLE || (_type) == TDMT_VND_CREATE_STB || (_type) == TDMT_VND_DROP_TABLE || \
(_type) == TDMT_VND_DROP_STB)
((_type) == TDMT_VND_CREATE_TABLE || (_type) == TDMT_MND_CREATE_STB || (_type) == TDMT_VND_DROP_TABLE || \
(_type) == TDMT_MND_DROP_STB)
#define NEED_SCHEDULER_REDIRECT_ERROR(_code) \
((_code) == TSDB_CODE_RPC_REDIRECT || (_code) == TSDB_CODE_NODE_NOT_DEPLOYED || \

View File

@ -171,8 +171,8 @@ typedef struct {
} STaskDispatcherFixedEp;
typedef struct {
// int8_t hashMethod;
char stbFullName[TSDB_TABLE_FNAME_LEN];
int32_t waitingRspCnt;
SUseDbRsp dbInfo;
} STaskDispatcherShuffle;
@ -270,7 +270,7 @@ typedef struct SStreamTask {
int64_t startVer;
int64_t checkpointVer;
int64_t processedVer;
int32_t numOfVgroups;
// int32_t numOfVgroups;
// children info
SArray* childEpInfo; // SArray<SStreamChildEpInfo*>

View File

@ -513,7 +513,6 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_ONLY_ONE_JSON_TAG TAOS_DEF_ERROR_CODE(0, 0x2633)
#define TSDB_CODE_PAR_INCORRECT_NUM_OF_COL TAOS_DEF_ERROR_CODE(0, 0x2634)
#define TSDB_CODE_PAR_INCORRECT_TIMESTAMP_VAL TAOS_DEF_ERROR_CODE(0, 0x2635)
#define TSDB_CODE_PAR_INVALID_DAYS_VALUE TAOS_DEF_ERROR_CODE(0, 0x2636)
#define TSDB_CODE_PAR_OFFSET_LESS_ZERO TAOS_DEF_ERROR_CODE(0, 0x2637)
#define TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_BY TAOS_DEF_ERROR_CODE(0, 0x2638)
#define TSDB_CODE_PAR_INVALID_TOPIC_QUERY TAOS_DEF_ERROR_CODE(0, 0x2639)

View File

@ -222,6 +222,7 @@ typedef struct SRequestObj {
int32_t code;
SArray* dbList;
SArray* tableList;
SArray* targetTableList;
SQueryExecMetric metric;
SRequestSendRecvBody body;
bool syncQuery; // todo refactor: async query object

View File

@ -235,6 +235,7 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
if (TSDB_CODE_SUCCESS == code || NEED_CLIENT_HANDLE_ERROR(code)) {
TSWAP(pRequest->dbList, (*pQuery)->pDbList);
TSWAP(pRequest->tableList, (*pQuery)->pTableList);
TSWAP(pRequest->targetTableList, (*pQuery)->pTargetTableList);
}
return code;
@ -851,7 +852,7 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
tscDebug("schedulerExecCb request type %s", TMSG_INFO(pRequest->type));
if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) {
removeMeta(pTscObj, pRequest->tableList);
removeMeta(pTscObj, pRequest->targetTableList);
}
// return to client
@ -1094,7 +1095,7 @@ SRequestObj* execQuery(uint64_t connId, const char* sql, int sqlLen, bool valida
} while (retryNum++ < REQUEST_TOTAL_EXEC_TIMES);
if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) {
removeMeta(pRequest->pTscObj, pRequest->tableList);
removeMeta(pRequest->pTscObj, pRequest->targetTableList);
}
return pRequest;

View File

@ -687,6 +687,7 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
TSWAP(pRequest->dbList, (pQuery)->pDbList);
TSWAP(pRequest->tableList, (pQuery)->pTableList);
TSWAP(pRequest->targetTableList, (pQuery)->pTargetTableList);
destorySqlParseWrapper(pWrapper);

View File

@ -693,6 +693,7 @@ int stmtBindBatch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int32_t colIdx) {
TSWAP(pStmt->exec.pRequest->dbList, pStmt->sql.pQuery->pDbList);
TSWAP(pStmt->exec.pRequest->tableList, pStmt->sql.pQuery->pTableList);
TSWAP(pStmt->exec.pRequest->targetTableList, pStmt->sql.pQuery->pTargetTableList);
// if (STMT_TYPE_QUERY == pStmt->sql.queryRes) {
// STMT_ERR_RET(stmtRestoreQueryFields(pStmt));

View File

@ -1206,6 +1206,7 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
SDecoder decoder;
tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead));
tDecodeSMqDataRsp(&decoder, &pRspWrapper->dataRsp);
tDecoderClear(&decoder);
memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead));
} else {
ASSERT(rspType == TMQ_MSG_TYPE__POLL_META_RSP);
@ -1859,6 +1860,10 @@ tmq_res_t tmq_get_res_type(TAOS_RES* res) {
if (TD_RES_TMQ(res)) {
return TMQ_RES_DATA;
} else if (TD_RES_TMQ_META(res)) {
SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DELETE) {
return TMQ_RES_DATA;
}
return TMQ_RES_TABLE_META;
} else {
return TMQ_RES_INVALID;
@ -1913,17 +1918,6 @@ const char* tmq_get_table_name(TAOS_RES* res) {
return NULL;
}
int32_t tmq_get_raw_meta(TAOS_RES* res, tmq_raw_data *raw) {
if (TD_RES_TMQ_META(res) && raw) {
SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
raw->raw_meta = pMetaRspObj->metaRsp.metaRsp;
raw->raw_meta_len = pMetaRspObj->metaRsp.metaRspLen;
raw->raw_meta_type = pMetaRspObj->metaRsp.resMsgType;
return TSDB_CODE_SUCCESS;
}
return TSDB_CODE_INVALID_PARA;
}
static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id,
int8_t t) {
char* string = NULL;
@ -2436,30 +2430,6 @@ _exit:
return string;
}
char* tmq_get_json_meta(TAOS_RES* res) {
if (!TD_RES_TMQ_META(res)) {
return NULL;
}
SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_STB) {
return processCreateStb(&pMetaRspObj->metaRsp);
} else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_STB) {
return processAlterStb(&pMetaRspObj->metaRsp);
} else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_STB) {
return processDropSTable(&pMetaRspObj->metaRsp);
} else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_TABLE) {
return processCreateTable(&pMetaRspObj->metaRsp);
} else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_TABLE) {
return processAlterTable(&pMetaRspObj->metaRsp);
} else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_TABLE) {
return processDropTable(&pMetaRspObj->metaRsp);
}
return NULL;
}
void tmq_free_json_meta(char* jsonMeta) { taosMemoryFreeClear(jsonMeta); }
static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
SVCreateStbReq req = {0};
SDecoder coder;
@ -2531,6 +2501,13 @@ static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
pQuery.stableQuery = true;
launchQueryImpl(pRequest, &pQuery, true, NULL);
if(pRequest->code == TSDB_CODE_SUCCESS){
SCatalog* pCatalog = NULL;
catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
catalogRemoveTableMeta(pCatalog, &tableName);
}
code = pRequest->code;
taosMemoryFree(pCmdMsg.pMsg);
@ -2572,7 +2549,7 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
pReq.suid = req.suid;
STscObj* pTscObj = pRequest->pTscObj;
SName tableName;
SName tableName = {0};
tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name);
SCmdMsgInfo pCmdMsg = {0};
@ -2593,6 +2570,13 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
pQuery.stableQuery = true;
launchQueryImpl(pRequest, &pQuery, true, NULL);
if(pRequest->code == TSDB_CODE_SUCCESS){
SCatalog* pCatalog = NULL;
catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
catalogRemoveTableMeta(pCatalog, &tableName);
}
code = pRequest->code;
taosMemoryFree(pCmdMsg.pMsg);
@ -2659,17 +2643,20 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
.requestId = pRequest->requestId,
.requestObjRefId = pRequest->self,
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
// loop to create table
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
pCreateReq = req.pReqs + iReq;
SVgroupInfo pInfo = {0};
SName pName;
SName pName = {0};
toName(pTscObj->acctId, pRequest->pDb, pCreateReq->name, &pName);
code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
taosArrayPush(pRequest->tableList, &pName);
SVgroupCreateTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
if (pTableBatch == NULL) {
@ -2703,8 +2690,11 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
goto end;
}
launchQueryImpl(pRequest, pQuery, false, NULL);
pQuery = NULL; // no need to free in the end
launchQueryImpl(pRequest, pQuery, true, NULL);
if (pRequest->code == TSDB_CODE_SUCCESS){
removeMeta(pTscObj, pRequest->tableList);
}
code = pRequest->code;
end:
@ -2772,19 +2762,21 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
.requestId = pRequest->requestId,
.requestObjRefId = pRequest->self,
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
// loop to create table
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
pDropReq = req.pReqs + iReq;
pDropReq->igNotExists = true;
SVgroupInfo pInfo = {0};
SName pName;
SName pName = {0};
toName(pTscObj->acctId, pRequest->pDb, pDropReq->name, &pName);
code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
taosArrayPush(pRequest->tableList, &pName);
SVgroupDropTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
if (pTableBatch == NULL) {
SVgroupDropTableBatch tBatch = {0};
@ -2815,8 +2807,10 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
goto end;
}
launchQueryImpl(pRequest, pQuery, false, NULL);
pQuery = NULL; // no need to free in the end
launchQueryImpl(pRequest, pQuery, true, NULL);
if (pRequest->code == TSDB_CODE_SUCCESS){
removeMeta(pTscObj, pRequest->tableList);
}
code = pRequest->code;
end:
@ -2827,6 +2821,70 @@ end:
return code;
}
// delete from db.tabl where .. -> delete from tabl where ..
// delete from db .tabl where .. -> delete from tabl where ..
//static void getTbName(char *sql){
// char *ch = sql;
//
// bool inBackQuote = false;
// int8_t dotIndex = 0;
// while(*ch != '\0'){
// if(!inBackQuote && *ch == '`'){
// inBackQuote = true;
// ch++;
// continue;
// }
//
// if(inBackQuote && *ch == '`'){
// inBackQuote = false;
// ch++;
//
// continue;
// }
//
// if(!inBackQuote && *ch == '.'){
// dotIndex ++;
// if(dotIndex == 2){
// memmove(sql, ch + 1, strlen(ch + 1) + 1);
// break;
// }
// }
// ch++;
// }
//}
static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) {
SDeleteRes req = {0};
SDecoder coder = {0};
int32_t code = TSDB_CODE_SUCCESS;
// decode and process req
void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
int32_t len = metaLen - sizeof(SMsgHead);
tDecoderInit(&coder, data, len);
if (tDecodeDeleteRes(&coder, &req) < 0) {
code = TSDB_CODE_INVALID_PARA;
goto end;
}
// getTbName(req.tableFName);
char sql[256] = {0};
sprintf(sql, "delete from `%s` where `%s` >= %" PRId64" and `%s` <= %" PRId64, req.tableFName, req.tsColName, req.skey, req.tsColName, req.ekey);
printf("delete sql:%s\n", sql);
TAOS_RES* res = taos_query(taos, sql);
SRequestObj *pRequest = (SRequestObj *)res;
code = pRequest->code;
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
code = TSDB_CODE_SUCCESS;
}
taos_free_result(res);
end:
tDecoderClear(&coder);
return code;
}
static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
SVAlterTbReq req = {0};
SDecoder coder = {0};
@ -2914,15 +2972,21 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
goto end;
}
launchQueryImpl(pRequest, pQuery, false, NULL);
pQuery = NULL; // no need to free in the end
launchQueryImpl(pRequest, pQuery, true, NULL);
pVgData = NULL;
pArray = NULL;
code = pRequest->code;
if (code == TSDB_CODE_VND_TABLE_NOT_EXIST) {
code = 0;
code = TSDB_CODE_SUCCESS;
}
if(pRequest->code == TSDB_CODE_SUCCESS){
SExecResult* pRes = &pRequest->body.resInfo.execRes;
if(pRes->res != NULL){
code = handleAlterTbExecRes(pRes->res, pCatalog);
}
}
end:
taosArrayDestroy(pArray);
if (pVgData) taosMemoryFreeClear(pVgData->pData);
@ -2933,27 +2997,6 @@ end:
return code;
}
int32_t taos_write_raw_meta(TAOS *taos, tmq_raw_data raw_meta){
if (!taos) {
return TSDB_CODE_INVALID_PARA;
}
if(raw_meta.raw_meta_type == TDMT_VND_CREATE_STB) {
return taosCreateStb(taos, raw_meta.raw_meta, raw_meta.raw_meta_len);
}else if(raw_meta.raw_meta_type == TDMT_VND_ALTER_STB){
return taosCreateStb(taos, raw_meta.raw_meta, raw_meta.raw_meta_len);
}else if(raw_meta.raw_meta_type == TDMT_VND_DROP_STB){
return taosDropStb(taos, raw_meta.raw_meta, raw_meta.raw_meta_len);
}else if(raw_meta.raw_meta_type == TDMT_VND_CREATE_TABLE){
return taosCreateTable(taos, raw_meta.raw_meta, raw_meta.raw_meta_len);
}else if(raw_meta.raw_meta_type == TDMT_VND_ALTER_TABLE){
return taosAlterTable(taos, raw_meta.raw_meta, raw_meta.raw_meta_len);
}else if(raw_meta.raw_meta_type == TDMT_VND_DROP_TABLE){
return taosDropTable(taos, raw_meta.raw_meta, raw_meta.raw_meta_len);
}
return TSDB_CODE_INVALID_PARA;
}
typedef struct{
SVgroupInfo vg;
void *data;
@ -2964,15 +3007,196 @@ static void destroyVgHash(void* data) {
taosMemoryFreeClear(vgData->data);
}
int32_t taos_write_raw_data(TAOS *taos, TAOS_RES *msg){
if (!TD_RES_TMQ(msg)) {
uError("WriteRaw:msg is not tmq : %d", *(int8_t*)msg);
return TSDB_CODE_TMQ_INVALID_MSG;
int taos_write_raw_block(TAOS *taos, int rows, char *pData, const char* tbname){
int32_t code = TSDB_CODE_SUCCESS;
STableMeta* pTableMeta = NULL;
SQuery *pQuery = NULL;
SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
if(!pRequest){
uError("WriteRaw:createRequest error request is null");
code = terrno;
goto end;
}
if (!pRequest->pDb) {
uError("WriteRaw:not use db");
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
goto end;
}
SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
strcpy(pName.dbname, pRequest->pDb);
strcpy(pName.tname, tbname);
struct SCatalog *pCatalog = NULL;
code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
if(code != TSDB_CODE_SUCCESS){
uError("WriteRaw: get gatlog error");
goto end;
}
SRequestConnInfo conn = {0};
conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter;
conn.requestId = pRequest->requestId;
conn.requestObjRefId = pRequest->self;
conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
SVgroupInfo vgData = {0};
code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vgData);
if (code != TSDB_CODE_SUCCESS) {
uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbname);
goto end;
}
code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
if (code != TSDB_CODE_SUCCESS) {
uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbname);
goto end;
}
uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
uint64_t uid = pTableMeta->uid;
int32_t numOfCols = pTableMeta->tableInfo.numOfColumns;
uint16_t fLen = 0;
int32_t rowSize = 0;
int16_t nVar = 0;
for (int i = 0; i < numOfCols; i++) {
SSchema *schema = pTableMeta->schema + i;
fLen += TYPE_BYTES[schema->type];
rowSize += schema->bytes;
if(IS_VAR_DATA_TYPE(schema->type)){
nVar ++;
}
}
int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
(int32_t)TD_BITMAP_BYTES(numOfCols - 1);
int32_t schemaLen = 0;
int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
int32_t totalLen = sizeof(SSubmitReq) + submitLen;
SSubmitReq* subReq = taosMemoryCalloc(1, totalLen);
SSubmitBlk* blk = POINTER_SHIFT(subReq, sizeof(SSubmitReq));
void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
SRowBuilder rb = {0};
tdSRowInit(&rb, pTableMeta->sversion);
tdSRowSetTpInfo(&rb, numOfCols, fLen);
int32_t dataLen = 0;
char* pStart = pData + sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t));
int32_t* colLength = (int32_t*)pStart;
pStart += sizeof(int32_t) * numOfCols;
SResultColumn *pCol = taosMemoryCalloc(numOfCols, sizeof(SResultColumn));
for (int32_t i = 0; i < numOfCols; ++i) {
if (IS_VAR_DATA_TYPE(pTableMeta->schema[i].type)) {
pCol[i].offset = (int32_t*)pStart;
pStart += rows * sizeof(int32_t);
} else {
pCol[i].nullbitmap = pStart;
pStart += BitmapLen(rows);
}
pCol[i].pData = pStart;
pStart += colLength[i];
}
for (int32_t j = 0; j < rows; j++) {
tdSRowResetBuf(&rb, rowData);
int32_t offset = 0;
for (int32_t k = 0; k < numOfCols; k++) {
const SSchema* pColumn = &pTableMeta->schema[k];
if (IS_VAR_DATA_TYPE(pColumn->type)) {
if (pCol[k].offset[j] != -1) {
char* data = pCol[k].pData + pCol[k].offset[j];
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
} else {
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
}
} else {
if (!colDataIsNull_f(pCol[k].nullbitmap, j)) {
char* data = pCol[k].pData + pColumn->bytes * j;
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
} else {
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
}
}
offset += TYPE_BYTES[pColumn->type];
}
int32_t rowLen = TD_ROW_LEN(rowData);
rowData = POINTER_SHIFT(rowData, rowLen);
dataLen += rowLen;
}
taosMemoryFree(pCol);
blk->uid = htobe64(uid);
blk->suid = htobe64(suid);
blk->padding = htonl(blk->padding);
blk->sversion = htonl(pTableMeta->sversion);
blk->schemaLen = htonl(schemaLen);
blk->numOfRows = htons(rows);
blk->dataLen = htonl(dataLen);
subReq->length = sizeof(SSubmitReq) + sizeof(SSubmitBlk) + schemaLen + dataLen;
subReq->numOfBlocks = 1;
pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
if (NULL == pQuery) {
uError("create SQuery error");
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
pQuery->haveResultSet = false;
pQuery->msgType = TDMT_VND_SUBMIT;
pQuery->pRoot = (SNode *)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
if (NULL == pQuery->pRoot) {
uError("create pQuery->pRoot error");
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
SVnodeModifOpStmt *nodeStmt = (SVnodeModifOpStmt *)(pQuery->pRoot);
nodeStmt->payloadType = PAYLOAD_TYPE_KV;
nodeStmt->pDataBlocks = taosArrayInit(1, POINTER_BYTES);
SVgDataBlocks *dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
if (NULL == dst) {
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto end;
}
dst->vg = vgData;
dst->numOfTables = subReq->numOfBlocks;
dst->size = subReq->length;
dst->pData = (char*)subReq;
subReq->header.vgId = htonl(dst->vg.vgId);
subReq->version = htonl(1);
subReq->header.contLen = htonl(subReq->length);
subReq->length = htonl(subReq->length);
subReq->numOfBlocks = htonl(subReq->numOfBlocks);
subReq = NULL; // no need free
taosArrayPush(nodeStmt->pDataBlocks, &dst);
launchQueryImpl(pRequest, pQuery, true, NULL);
code = pRequest->code;
end:
taosMemoryFreeClear(pTableMeta);
qDestroyQuery(pQuery);
return code;
}
static int32_t tmqWriteRaw(TAOS *taos, void* data, int32_t dataLen){
int32_t code = TSDB_CODE_SUCCESS;
SHashObj *pVgHash = NULL;
SQuery *pQuery = NULL;
SMqRspObj rspObj = {0};
SDecoder decoder = {0};
terrno = TSDB_CODE_SUCCESS;
SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
@ -2981,6 +3205,17 @@ int32_t taos_write_raw_data(TAOS *taos, TAOS_RES *msg){
return terrno;
}
rspObj.resIter = -1;
rspObj.resType = RES_TYPE__TMQ;
tDecoderInit(&decoder, data, dataLen);
code = tDecodeSMqDataRsp(&decoder, &rspObj.rsp);
if (code != 0){
uError("WriteRaw:decode smqDataRsp error");
code = TSDB_CODE_INVALID_MSG;
goto end;
}
if (!pRequest->pDb) {
uError("WriteRaw:not use db");
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
@ -3001,18 +3236,18 @@ int32_t taos_write_raw_data(TAOS *taos, TAOS_RES *msg){
conn.requestId = pRequest->requestId;
conn.requestObjRefId = pRequest->self;
conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
SMqRspObj *rspObj = ((SMqRspObj*)msg);
printf("raw data block num:%d\n", rspObj->rsp.blockNum);
while (++rspObj->resIter < rspObj->rsp.blockNum) {
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj->rsp.blockData, rspObj->resIter);
if (!rspObj->rsp.withSchema) {
uError("WriteRaw:no schema, iter:%d", rspObj->resIter);
printf("raw data block num:%d\n", rspObj.rsp.blockNum);
while (++rspObj.resIter < rspObj.rsp.blockNum) {
SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
if (!rspObj.rsp.withSchema) {
uError("WriteRaw:no schema, iter:%d", rspObj.resIter);
goto end;
}
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj->rsp.blockSchema, rspObj->resIter);
setResSchemaInfo(&rspObj->resInfo, pSW->pSchema, pSW->nCols);
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.rsp.blockSchema, rspObj.resIter);
setResSchemaInfo(&rspObj.resInfo, pSW->pSchema, pSW->nCols);
code = setQueryResultFromRsp(&rspObj->resInfo, pRetrieve, false, false);
code = setQueryResultFromRsp(&rspObj.resInfo, pRetrieve, false, false);
if(code != TSDB_CODE_SUCCESS){
uError("WriteRaw: setQueryResultFromRsp error");
goto end;
@ -3030,13 +3265,13 @@ int32_t taos_write_raw_data(TAOS *taos, TAOS_RES *msg){
}
}
int32_t rows = rspObj->resInfo.numOfRows;
int32_t rows = rspObj.resInfo.numOfRows;
int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
(int32_t)TD_BITMAP_BYTES(pSW->nCols - 1);
int32_t schemaLen = 0;
int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
const char* tbName = tmq_get_table_name(msg);
const char* tbName = (const char*)taosArrayGetP(rspObj.rsp.blockTbName, rspObj.resIter);
if(!tbName){
uError("WriteRaw: tbname is null");
code = TSDB_CODE_TMQ_INVALID_MSG;
@ -3108,13 +3343,13 @@ int32_t taos_write_raw_data(TAOS *taos, TAOS_RES *msg){
for (int32_t j = 0; j < rows; j++) {
tdSRowResetBuf(&rb, rowData);
doSetOneRowPtr(&rspObj->resInfo);
rspObj->resInfo.current += 1;
doSetOneRowPtr(&rspObj.resInfo);
rspObj.resInfo.current += 1;
int32_t offset = 0;
for (int32_t k = 0; k < pSW->nCols; k++) {
const SSchema* pColumn = &pSW->pSchema[k];
char *data = rspObj->resInfo.row[k];
char *data = rspObj.resInfo.row[k];
if (!data) {
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
} else {
@ -3186,13 +3421,105 @@ int32_t taos_write_raw_data(TAOS *taos, TAOS_RES *msg){
launchQueryImpl(pRequest, pQuery, true, NULL);
code = pRequest->code;
end:
tDecoderClear(&decoder);
taos_free_result(&rspObj);
qDestroyQuery(pQuery);
destroyRequest(pRequest);
taosHashCleanup(pVgHash);
return code;
}
char* tmq_get_json_meta(TAOS_RES* res) {
if (!TD_RES_TMQ_META(res)) {
return NULL;
}
SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_STB) {
return processCreateStb(&pMetaRspObj->metaRsp);
} else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_STB) {
return processAlterStb(&pMetaRspObj->metaRsp);
} else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_STB) {
return processDropSTable(&pMetaRspObj->metaRsp);
} else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_TABLE) {
return processCreateTable(&pMetaRspObj->metaRsp);
} else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_TABLE) {
return processAlterTable(&pMetaRspObj->metaRsp);
} else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_TABLE) {
return processDropTable(&pMetaRspObj->metaRsp);
}
return NULL;
}
void tmq_free_json_meta(char* jsonMeta) { taosMemoryFreeClear(jsonMeta); }
int32_t tmq_get_raw(TAOS_RES* res, tmq_raw_data *raw) {
if (!raw || !res){
return TSDB_CODE_INVALID_PARA;
}
if (TD_RES_TMQ_META(res)) {
SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
raw->raw = pMetaRspObj->metaRsp.metaRsp;
raw->raw_len = pMetaRspObj->metaRsp.metaRspLen;
raw->raw_type = pMetaRspObj->metaRsp.resMsgType;
} else if(TD_RES_TMQ(res)){
SMqRspObj *rspObj = ((SMqRspObj*)res);
int32_t len = 0;
int32_t code = 0;
tEncodeSize(tEncodeSMqDataRsp, &rspObj->rsp, len, code);
if (code < 0) {
return -1;
}
void *buf = taosMemoryCalloc(1, len);
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, len);
tEncodeSMqDataRsp(&encoder, &rspObj->rsp);
tEncoderClear(&encoder);
raw->raw = buf;
raw->raw_len = len;
raw->raw_type = RES_TYPE__TMQ;
} else {
return TSDB_CODE_TMQ_INVALID_MSG;
}
return TSDB_CODE_SUCCESS;
}
void tmq_free_raw(tmq_raw_data raw) {
if (raw.raw_type == RES_TYPE__TMQ){
taosMemoryFree(raw.raw);
}
}
int32_t tmq_write_raw(TAOS *taos, tmq_raw_data raw){
if (!taos) {
return TSDB_CODE_INVALID_PARA;
}
if(raw.raw_type == TDMT_VND_CREATE_STB) {
return taosCreateStb(taos, raw.raw, raw.raw_len);
}else if(raw.raw_type == TDMT_VND_ALTER_STB){
return taosCreateStb(taos, raw.raw, raw.raw_len);
}else if(raw.raw_type == TDMT_VND_DROP_STB){
return taosDropStb(taos, raw.raw, raw.raw_len);
}else if(raw.raw_type == TDMT_VND_CREATE_TABLE){
return taosCreateTable(taos, raw.raw, raw.raw_len);
}else if(raw.raw_type == TDMT_VND_ALTER_TABLE){
return taosAlterTable(taos, raw.raw, raw.raw_len);
}else if(raw.raw_type == TDMT_VND_DROP_TABLE) {
return taosDropTable(taos, raw.raw, raw.raw_len);
}else if(raw.raw_type == TDMT_VND_DELETE){
return taosDeleteData(taos, raw.raw, raw.raw_len);
}else if(raw.raw_type == RES_TYPE__TMQ){
return tmqWriteRaw(taos, raw.raw, raw.raw_len);
}
return TSDB_CODE_INVALID_PARA;
}
void tmq_commit_async(tmq_t* tmq, const TAOS_RES* msg, tmq_commit_cb* cb, void* param) {
//
tmqCommitInner2(tmq, msg, 0, 1, cb, param);

View File

@ -89,8 +89,8 @@ static const SSysDbTableSchema userDBSchema[] = {
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "retention", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL},
{.name = "cache_model", .bytes = TSDB_CACHE_MODEL_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "cache_size", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "cachemodel", .bytes = TSDB_CACHE_MODEL_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "cachesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "wal_level", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
{.name = "wal_fsync_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "wal_retention_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT},

View File

@ -5681,6 +5681,8 @@ int32_t tEncodeDeleteRes(SEncoder *pCoder, const SDeleteRes *pRes) {
if (tEncodeI64(pCoder, pRes->ekey) < 0) return -1;
if (tEncodeI64v(pCoder, pRes->affectedRows) < 0) return -1;
if (tEncodeCStr(pCoder, pRes->tableFName) < 0) return -1;
if (tEncodeCStr(pCoder, pRes->tsColName) < 0) return -1;
return 0;
}
@ -5692,12 +5694,14 @@ int32_t tDecodeDeleteRes(SDecoder *pCoder, SDeleteRes *pRes) {
if (tDecodeI32v(pCoder, &nUid) < 0) return -1;
for (int32_t iUid = 0; iUid < nUid; iUid++) {
if (tDecodeU64(pCoder, &uid) < 0) return -1;
taosArrayPush(pRes->uidList, &uid);
if (pRes->uidList) taosArrayPush(pRes->uidList, &uid);
}
if (tDecodeI64(pCoder, &pRes->skey) < 0) return -1;
if (tDecodeI64(pCoder, &pRes->ekey) < 0) return -1;
if (tDecodeI64v(pCoder, &pRes->affectedRows) < 0) return -1;
if (tDecodeCStrTo(pCoder, pRes->tableFName) < 0) return -1;
if (tDecodeCStrTo(pCoder, pRes->tsColName) < 0) return -1;
return 0;
}
int32_t tEncodeSMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) {

View File

@ -184,6 +184,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_MND_ALTER_STB, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_STB, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_TABLE_META, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_BATCH_META, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_TABLE_CFG, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_CREATE_SMA, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_SMA, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;

View File

@ -343,6 +343,7 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_UPDATE_TAG_VAL, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TABLE_META, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TABLE_CFG, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_BATCH_META, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TABLES_META, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_CANCEL_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_DROP_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;

View File

@ -89,7 +89,7 @@ static void vmProcessStreamQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo);
if (code != 0) {
if (terrno != 0) code = terrno;
dGError("vgId:%d, msg:%p failed to stream since %s", pVnode->vgId, pMsg, terrstr());
dGError("vgId:%d, msg:%p failed to process stream since %s", pVnode->vgId, pMsg, terrstr());
vmSendRsp(pMsg, code);
}

View File

@ -1,4 +1,11 @@
aux_source_directory(src MNODE_SRC)
IF (TD_PRIVILEGE)
ADD_DEFINITIONS(-D_PRIVILEGE)
ENDIF ()
IF (TD_PRIVILEGE)
LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/privilege/src/privilege.c)
ENDIF ()
add_library(mnode STATIC ${MNODE_SRC})
target_include_directories(
mnode
@ -8,11 +15,8 @@ target_include_directories(
target_link_libraries(
mnode scheduler sdb wal transport cjson sync monitor executor qworker stream parser
)
IF (TD_GRANT)
TARGET_LINK_LIBRARIES(mnode grant)
ENDIF ()
IF (TD_GRANT)
ADD_DEFINITIONS(-D_GRANT)
ENDIF ()

View File

@ -30,6 +30,7 @@ int32_t mndCheckDbPrivilege(SMnode *pMnode, const char *user, EOperType operType
int32_t mndCheckDbPrivilegeByName(SMnode *pMnode, const char *user, EOperType operType, const char *dbname);
int32_t mndCheckShowPrivilege(SMnode *pMnode, const char *user, EShowType showType, const char *dbname);
int32_t mndCheckAlterUserPrivilege(SUserObj *pOperUser, SUserObj *pUser, SAlterUserReq *pAlter);
int32_t mndSetUserAuthRsp(SMnode *pMnode, SUserObj *pUser, SGetUserAuthRsp *pRsp);
#ifdef __cplusplus
}

View File

@ -17,6 +17,7 @@
#define _TD_MND_USER_H_
#include "mndInt.h"
#include "thash.h"
#ifdef __cplusplus
extern "C" {
@ -28,9 +29,10 @@ SUserObj *mndAcquireUser(SMnode *pMnode, const char *userName);
void mndReleaseUser(SMnode *pMnode, SUserObj *pUser);
// for trans test
SSdbRaw *mndUserActionEncode(SUserObj *pUser);
int32_t mndValidateUserAuthInfo(SMnode *pMnode, SUserAuthVersion *pUsers, int32_t numOfUses, void **ppRsp,
int32_t *pRspLen);
SSdbRaw *mndUserActionEncode(SUserObj *pUser);
SHashObj *mndDupDbHash(SHashObj *pOld);
int32_t mndValidateUserAuthInfo(SMnode *pMnode, SUserAuthVersion *pUsers, int32_t numOfUses, void **ppRsp,
int32_t *pRspLen);
#ifdef __cplusplus
}

View File

@ -18,177 +18,20 @@
#include "mndDb.h"
#include "mndUser.h"
#ifndef _PRIVILEGE
int32_t mndInitPrivilege(SMnode *pMnode) { return 0; }
void mndCleanupPrivilege(SMnode *pMnode) {}
int32_t mndCheckOperPrivilege(SMnode *pMnode, const char *user, EOperType operType) {
int32_t code = 0;
SUserObj *pUser = mndAcquireUser(pMnode, user);
if (pUser == NULL) {
terrno = TSDB_CODE_MND_NO_USER_FROM_CONN;
code = -1;
goto _OVER;
}
if (pUser->superUser) {
goto _OVER;
}
if (!pUser->enable) {
terrno = TSDB_CODE_MND_USER_DISABLED;
code = -1;
goto _OVER;
}
switch (operType) {
case MND_OPER_CONNECT:
case MND_OPER_CREATE_FUNC:
case MND_OPER_DROP_FUNC:
case MND_OPER_SHOW_VARIBALES:
break;
default:
terrno = TSDB_CODE_MND_NO_RIGHTS;
code = -1;
}
_OVER:
mndReleaseUser(pMnode, pUser);
return code;
}
int32_t mndCheckAlterUserPrivilege(SUserObj *pOperUser, SUserObj *pUser, SAlterUserReq *pAlter) {
if (pUser->superUser && pAlter->alterType != TSDB_ALTER_USER_PASSWD) {
terrno = TSDB_CODE_MND_NO_RIGHTS;
return -1;
}
if (pOperUser->superUser) return 0;
if (!pOperUser->enable) {
terrno = TSDB_CODE_MND_USER_DISABLED;
return -1;
}
if (pAlter->alterType == TSDB_ALTER_USER_PASSWD) {
if (strcmp(pUser->user, pOperUser->user) == 0) {
if (pOperUser->sysInfo) return 0;
}
}
terrno = TSDB_CODE_MND_NO_RIGHTS;
return -1;
}
int32_t mndCheckShowPrivilege(SMnode *pMnode, const char *user, EShowType showType, const char *dbname) {
int32_t code = 0;
SUserObj *pUser = mndAcquireUser(pMnode, user);
if (pUser == NULL) {
code = -1;
goto _OVER;
}
if (pUser->superUser) {
goto _OVER;
}
if (!pUser->enable) {
terrno = TSDB_CODE_MND_USER_DISABLED;
code = -1;
goto _OVER;
}
if (pUser->sysInfo) {
goto _OVER;
}
switch (showType) {
case TSDB_MGMT_TABLE_DB:
case TSDB_MGMT_TABLE_STB:
case TSDB_MGMT_TABLE_INDEX:
case TSDB_MGMT_TABLE_STREAMS:
case TSDB_MGMT_TABLE_CONSUMERS:
case TSDB_MGMT_TABLE_TOPICS:
case TSDB_MGMT_TABLE_SUBSCRIPTIONS:
case TSDB_MGMT_TABLE_FUNC:
case TSDB_MGMT_TABLE_QUERIES:
case TSDB_MGMT_TABLE_CONNS:
case TSDB_MGMT_TABLE_APPS:
case TSDB_MGMT_TABLE_TRANS:
code = 0;
break;
default:
terrno = TSDB_CODE_MND_NO_RIGHTS;
code = -1;
goto _OVER;
}
if (showType == TSDB_MGMT_TABLE_STB || showType == TSDB_MGMT_TABLE_VGROUP || showType == TSDB_MGMT_TABLE_INDEX) {
code = mndCheckDbPrivilegeByName(pMnode, user, MND_OPER_READ_OR_WRITE_DB, dbname);
}
_OVER:
mndReleaseUser(pMnode, pUser);
return code;
}
int32_t mndCheckDbPrivilege(SMnode *pMnode, const char *user, EOperType operType, SDbObj *pDb) {
int32_t code = 0;
SUserObj *pUser = mndAcquireUser(pMnode, user);
if (pUser == NULL) {
code = -1;
goto _OVER;
}
if (pUser->superUser) goto _OVER;
if (!pUser->enable) {
terrno = TSDB_CODE_MND_USER_DISABLED;
code = -1;
goto _OVER;
}
if (operType == MND_OPER_CREATE_DB) {
if (pUser->sysInfo) goto _OVER;
}
if (operType == MND_OPER_ALTER_DB || operType == MND_OPER_DROP_DB || operType == MND_OPER_COMPACT_DB ||
operType == MND_OPER_TRIM_DB) {
if (strcmp(pUser->user, pDb->createUser) == 0 && pUser->sysInfo) goto _OVER;
}
if (operType == MND_OPER_USE_DB || operType == MND_OPER_READ_OR_WRITE_DB) {
if (strcmp(pUser->user, pDb->createUser) == 0) goto _OVER;
if (taosHashGet(pUser->readDbs, pDb->name, strlen(pDb->name) + 1) != NULL) goto _OVER;
if (taosHashGet(pUser->writeDbs, pDb->name, strlen(pDb->name) + 1) != NULL) goto _OVER;
}
if (operType == MND_OPER_WRITE_DB) {
if (strcmp(pUser->user, pDb->createUser) == 0) goto _OVER;
if (taosHashGet(pUser->writeDbs, pDb->name, strlen(pDb->name) + 1) != NULL) goto _OVER;
}
if (operType == MND_OPER_READ_DB) {
if (strcmp(pUser->user, pDb->createUser) == 0) goto _OVER;
if (taosHashGet(pUser->readDbs, pDb->name, strlen(pDb->name) + 1) != NULL) goto _OVER;
}
terrno = TSDB_CODE_MND_NO_RIGHTS;
code = -1;
_OVER:
mndReleaseUser(pMnode, pUser);
return code;
}
void mndCleanupPrivilege(SMnode *pMnode) {}
int32_t mndCheckOperPrivilege(SMnode *pMnode, const char *user, EOperType operType) { return 0; }
int32_t mndCheckAlterUserPrivilege(SUserObj *pOperUser, SUserObj *pUser, SAlterUserReq *pAlter) { return 0; }
int32_t mndCheckShowPrivilege(SMnode *pMnode, const char *user, EShowType showType, const char *dbname) { return 0; }
int32_t mndCheckDbPrivilege(SMnode *pMnode, const char *user, EOperType operType, SDbObj *pDb) { return 0; }
int32_t mndCheckDbPrivilegeByName(SMnode *pMnode, const char *user, EOperType operType, const char *dbname) {
SDbObj *pDb = mndAcquireDb(pMnode, dbname);
if (pDb == NULL) return -1;
int32_t code = mndCheckDbPrivilege(pMnode, user, operType, pDb);
mndReleaseDb(pMnode, pDb);
return code;
}
return 0;
}
int32_t mndSetUserAuthRsp(SMnode *pMnode, SUserObj *pUser, SGetUserAuthRsp *pRsp) {
memcpy(pRsp->user, pUser->user, TSDB_USER_LEN);
pRsp->superAuth = 1;
pRsp->version = pUser->authVersion;
return 0;
}
#endif

View File

@ -63,6 +63,106 @@ int32_t mndProcessQueryMsg(SRpcMsg *pMsg) {
return code;
}
int32_t mndProcessBatchMetaMsg(SRpcMsg *pMsg) {
int32_t code = 0;
int32_t offset = 0;
int32_t rspSize = 0;
SBatchReq *batchReq = (SBatchReq*)pMsg->pCont;
int32_t msgNum = ntohl(batchReq->msgNum);
offset += sizeof(SBatchReq);
SBatchMsg req = {0};
SBatchRsp rsp = {0};
SRpcMsg reqMsg = *pMsg;
SRpcMsg rspMsg = {0};
void* pRsp = NULL;
SMnode *pMnode = pMsg->info.node;
SArray* batchRsp = taosArrayInit(msgNum, sizeof(SBatchRsp));
if (NULL == batchRsp) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
for (int32_t i = 0; i < msgNum; ++i) {
req.msgType = ntohl(*(int32_t*)((char*)pMsg->pCont + offset));
offset += sizeof(req.msgType);
req.msgLen = ntohl(*(int32_t*)((char*)pMsg->pCont + offset));
offset += sizeof(req.msgLen);
req.msg = (char*)pMsg->pCont + offset;
offset += req.msgLen;
reqMsg.msgType = req.msgType;
reqMsg.pCont = req.msg;
reqMsg.contLen = req.msgLen;
reqMsg.info.rsp = NULL;
reqMsg.info.rspLen = 0;
MndMsgFp fp = pMnode->msgFp[TMSG_INDEX(req.msgType)];
if (fp == NULL) {
mError("msg:%p, failed to get msg handle, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType));
terrno = TSDB_CODE_MSG_NOT_PROCESSED;
return -1;
}
if ((*fp)(&reqMsg)) {
rsp.rspCode = terrno;
} else {
rsp.rspCode = 0;
}
rsp.reqType = reqMsg.msgType;
rsp.msgLen = reqMsg.info.rspLen;
rsp.msg = reqMsg.info.rsp;
taosArrayPush(batchRsp, &rsp);
rspSize += sizeof(rsp) + rsp.msgLen - POINTER_BYTES;
}
rspSize += sizeof(int32_t);
offset = 0;
pRsp = rpcMallocCont(rspSize);
if (pRsp == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
*(int32_t*)((char*)pRsp + offset) = htonl(msgNum);
offset += sizeof(msgNum);
for (int32_t i = 0; i < msgNum; ++i) {
SBatchRsp *p = taosArrayGet(batchRsp, i);
*(int32_t*)((char*)pRsp + offset) = htonl(p->reqType);
offset += sizeof(p->reqType);
*(int32_t*)((char*)pRsp + offset) = htonl(p->msgLen);
offset += sizeof(p->msgLen);
*(int32_t*)((char*)pRsp + offset) = htonl(p->rspCode);
offset += sizeof(p->rspCode);
memcpy((char*)pRsp + offset, p->msg, p->msgLen);
offset += p->msgLen;
rpcFreeCont(p->msg);
}
taosArrayDestroy(batchRsp);
batchRsp = NULL;
_exit:
pMsg->info.rsp = pRsp;
pMsg->info.rspLen = rspSize;
if (code) {
mError("mnd get batch meta failed cause of %s", tstrerror(code));
}
taosArrayDestroyEx(batchRsp, tFreeSBatchRsp);
return code;
}
int32_t mndInitQuery(SMnode *pMnode) {
if (qWorkerInit(NODE_TYPE_MNODE, MNODE_HANDLE, NULL, (void **)&pMnode->pQuery, &pMnode->msgCb) != 0) {
mError("failed to init qworker in mnode since %s", terrstr());
@ -76,6 +176,7 @@ int32_t mndInitQuery(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_SCH_MERGE_FETCH, mndProcessQueryMsg);
mndSetMsgHandle(pMnode, TDMT_SCH_DROP_TASK, mndProcessQueryMsg);
mndSetMsgHandle(pMnode, TDMT_SCH_QUERY_HEARTBEAT, mndProcessQueryMsg);
mndSetMsgHandle(pMnode, TDMT_MND_BATCH_META, mndProcessBatchMetaMsg);
return 0;
}

View File

@ -114,18 +114,26 @@ int32_t mndAddSinkToTask(SMnode* pMnode, SStreamObj* pStream, SStreamTask* pTask
int32_t mndAddDispatcherToInnerTask(SMnode* pMnode, SStreamObj* pStream, SStreamTask* pTask) {
pTask->sinkType = TASK_SINK__NONE;
bool isShuffle = false;
if (pStream->fixedSinkVgId == 0) {
pTask->dispatchType = TASK_DISPATCH__SHUFFLE;
pTask->dispatchMsgType = TDMT_STREAM_TASK_DISPATCH;
SDbObj* pDb = mndAcquireDb(pMnode, pStream->targetDb);
ASSERT(pDb);
if (mndExtractDbInfo(pMnode, pDb, &pTask->shuffleDispatcher.dbInfo, NULL) < 0) {
ASSERT(0);
return -1;
if (pDb->cfg.numOfVgroups > 1) {
isShuffle = true;
pTask->dispatchType = TASK_DISPATCH__SHUFFLE;
pTask->dispatchMsgType = TDMT_STREAM_TASK_DISPATCH;
if (mndExtractDbInfo(pMnode, pDb, &pTask->shuffleDispatcher.dbInfo, NULL) < 0) {
ASSERT(0);
return -1;
}
}
sdbRelease(pMnode->pSdb, pDb);
sdbRelease(pMnode->pSdb, pDb);
}
if (isShuffle) {
memcpy(pTask->shuffleDispatcher.stbFullName, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN);
SArray* pVgs = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
int32_t sz = taosArrayGetSize(pVgs);
@ -383,10 +391,12 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
// exec
pInnerTask->execType = TASK_EXEC__PIPE;
#if 0
SDbObj* pSourceDb = mndAcquireDb(pMnode, pStream->sourceDb);
ASSERT(pDbObj != NULL);
sdbRelease(pSdb, pSourceDb);
pInnerTask->numOfVgroups = pSourceDb->cfg.numOfVgroups;
#endif
if (tsSchedStreamToSnode) {
SSnodeObj* pSnode = mndSchedFetchOneSnode(pMnode);

View File

@ -15,8 +15,8 @@
#define _DEFAULT_SOURCE
#include "mndUser.h"
#include "mndPrivilege.h"
#include "mndDb.h"
#include "mndPrivilege.h"
#include "mndShow.h"
#include "mndTrans.h"
#include "tbase64.h"
@ -408,7 +408,7 @@ static int32_t mndAlterUser(SMnode *pMnode, SUserObj *pOld, SUserObj *pNew, SRpc
return 0;
}
static SHashObj *mndDupDbHash(SHashObj *pOld) {
SHashObj *mndDupDbHash(SHashObj *pOld) {
SHashObj *pNew =
taosHashInit(taosHashGetSize(pOld), taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
if (pNew == NULL) {
@ -662,38 +662,6 @@ _OVER:
return code;
}
static int32_t mndSetUserAuthRsp(SMnode *pMnode, SUserObj *pUser, SGetUserAuthRsp *pRsp) {
memcpy(pRsp->user, pUser->user, TSDB_USER_LEN);
pRsp->superAuth = pUser->superUser;
pRsp->version = pUser->authVersion;
taosRLockLatch(&pUser->lock);
pRsp->readDbs = mndDupDbHash(pUser->readDbs);
pRsp->writeDbs = mndDupDbHash(pUser->writeDbs);
taosRUnLockLatch(&pUser->lock);
pRsp->createdDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
if (NULL == pRsp->createdDbs) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL;
while (1) {
SDbObj *pDb = NULL;
pIter = sdbFetch(pSdb, SDB_DB, pIter, (void **)&pDb);
if (pIter == NULL) break;
if (strcmp(pDb->createUser, pUser->user) == 0) {
int32_t len = strlen(pDb->name) + 1;
taosHashPut(pRsp->createdDbs, pDb->name, len, pDb->name, len);
}
sdbRelease(pSdb, pDb);
}
return 0;
}
static int32_t mndProcessGetUserAuthReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
int32_t code = -1;

View File

@ -78,8 +78,9 @@ void vnodeBufPoolReset(SVBufPool* pPool);
// vnodeQuery.c
int32_t vnodeQueryOpen(SVnode* pVnode);
void vnodeQueryClose(SVnode* pVnode);
int32_t vnodeGetTableMeta(SVnode* pVnode, SRpcMsg* pMsg);
int vnodeGetTableCfg(SVnode* pVnode, SRpcMsg* pMsg);
int32_t vnodeGetTableMeta(SVnode* pVnode, SRpcMsg* pMsg, bool direct);
int vnodeGetTableCfg(SVnode* pVnode, SRpcMsg* pMsg, bool direct);
int32_t vnodeGetBatchMeta(SVnode *pVnode, SRpcMsg *pMsg);
// vnodeCommit.c
int32_t vnodeBegin(SVnode* pVnode);

View File

@ -611,8 +611,8 @@ static int32_t tdRSmaFetchAndSubmitResult(SRSmaInfoItem *pItem, STSchema *pTSche
goto _err;
}
smaDebug("vgId:%d, process submit req for rsma table %" PRIi64 " level %" PRIi8 " version:%"PRIi64, SMA_VID(pSma),
suid, pItem->level, output->info.version);
smaDebug("vgId:%d, process submit req for rsma table %" PRIi64 " level %" PRIi8 " version:%" PRIi64,
SMA_VID(pSma), suid, pItem->level, output->info.version);
taosMemoryFreeClear(pReq);
taosArrayClear(pResult);
@ -644,7 +644,7 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType
smaDebug("vgId:%d, execute rsma %" PRIi8 " task for qTaskInfo:%p suid:%" PRIu64, SMA_VID(pSma), level,
pItem->taskInfo, suid);
if (qSetStreamInput(pItem->taskInfo, pMsg, inputType, true) < 0) { // INPUT__DATA_SUBMIT
if (qSetMultiStreamInput(pItem->taskInfo, pMsg, 1, inputType) < 0) { // INPUT__DATA_SUBMIT
smaError("vgId:%d, rsma % " PRIi8 " qSetStreamInput failed since %s", SMA_VID(pSma), level, tstrerror(terrno));
return TSDB_CODE_FAILED;
}
@ -1329,7 +1329,7 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
tdRefRSmaInfo(pSma, pRSmaInfo);
SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL};
qSetStreamInput(pItem->taskInfo, &dataBlock, STREAM_INPUT__DATA_BLOCK, false);
qSetMultiStreamInput(pItem->taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK);
tdRSmaFetchAndSubmitResult(pItem, pRSmaInfo->pTSchema, pRSmaInfo->suid, pStat, STREAM_INPUT__DATA_BLOCK);
tdUnRefRSmaInfo(pSma, pRSmaInfo);
@ -1356,4 +1356,4 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
_end:
tdReleaseSmaRef(smaMgmt.rsetId, pItem->refId, __func__, __LINE__);
}
}

View File

@ -146,8 +146,8 @@ int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, con
}
}
int32_t len;
int32_t code;
int32_t len = 0;
int32_t code = 0;
tEncodeSize(tEncodeSMqDataRsp, pRsp, len, code);
if (code < 0) {
return -1;
@ -164,9 +164,10 @@ int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, con
void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
SEncoder encoder;
SEncoder encoder = {0};
tEncoderInit(&encoder, abuf, len);
tEncodeSMqDataRsp(&encoder, pRsp);
tEncoderClear(&encoder);
SRpcMsg rsp = {
.info = pMsg->info,
@ -176,8 +177,8 @@ int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, con
};
tmsgSendRsp(&rsp);
char buf1[80];
char buf2[80];
char buf1[80] = {0};
char buf2[80] = {0};
tFormatOffset(buf1, 80, &pRsp->reqOffset);
tFormatOffset(buf2, 80, &pRsp->rspOffset);
tqDebug("vgId:%d from consumer:%" PRId64 ", (epoch %d) send rsp, block num: %d, reqOffset:%s, rspOffset:%s",
@ -652,7 +653,7 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, char* msg, int32_t msgLen) {
} else {
SReadHandle mgHandle = {
.vnode = NULL,
.numOfVgroups = pTask->numOfVgroups,
.numOfVgroups = (int32_t)taosArrayGetSize(pTask->childEpInfo),
};
pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle);
}

View File

@ -431,6 +431,12 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
pSup->tsColAgg.colId = PRIMARYKEY_TIMESTAMP_COL_ID;
code = tBlockDataInit(&pReader->status.fileBlockData);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
goto _end;
}
pReader->pResBlock = createResBlock(pCond, pReader->capacity);
if (pReader->pResBlock == NULL) {
code = terrno;
@ -1200,8 +1206,9 @@ static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo*
}
static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, TSDBROW* pRow,
STSRow* pTSRow, SIterInfo* pIter, int64_t key) {
SIterInfo* pIter, int64_t key) {
SRowMerger merge = {0};
STSRow* pTSRow = NULL;
SBlockData* pBlockData = &pReader->status.fileBlockData;
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
@ -1250,6 +1257,8 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
tRowMergerClear(&merge);
doAppendOneRow(pReader->pResBlock, pReader, pTSRow);
taosMemoryFree(pTSRow);
return TSDB_CODE_SUCCESS;
}
@ -1411,8 +1420,6 @@ static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanI
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
SBlockData* pBlockData = &pReader->status.fileBlockData;
STSRow* pTSRow = NULL;
int64_t key = pBlockData->aTSKEY[pDumpInfo->rowIndex];
TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader);
@ -1422,23 +1429,27 @@ static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanI
} else {
// imem + file
if (pBlockScanInfo->iiter.hasVal) {
return doMergeBufAndFileRows(pReader, pBlockScanInfo, piRow, pTSRow, &pBlockScanInfo->iiter, key);
return doMergeBufAndFileRows(pReader, pBlockScanInfo, piRow, &pBlockScanInfo->iiter, key);
}
// mem + file
if (pBlockScanInfo->iter.hasVal) {
return doMergeBufAndFileRows(pReader, pBlockScanInfo, pRow, pTSRow, &pBlockScanInfo->iter, key);
return doMergeBufAndFileRows(pReader, pBlockScanInfo, pRow, &pBlockScanInfo->iter, key);
}
// imem & mem are all empty, only file exist
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
STSRow* pTSRow = NULL;
SRowMerger merge = {0};
tRowMergerInit(&merge, &fRow, pReader->pSchema);
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
tRowMergerGetRow(&merge, &pTSRow);
doAppendOneRow(pReader->pResBlock, pReader, pTSRow);
taosMemoryFree(pTSRow);
tRowMergerClear(&merge);
return TSDB_CODE_SUCCESS;
}
}
@ -1716,7 +1727,8 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
TSDBKEY key = getCurrentKeyInBuf(pBlockIter, pReader);
if (fileBlockShouldLoad(pReader, pFBlock, pBlock, pScanInfo, key)) {
tBlockDataInit(&pStatus->fileBlockData);
tBlockDataReset(&pStatus->fileBlockData);
tBlockDataClearData(&pStatus->fileBlockData);
code = doLoadFileBlockData(pReader, pBlockIter, pScanInfo, &pStatus->fileBlockData);
if (code != TSDB_CODE_SUCCESS) {
return code;
@ -2160,6 +2172,8 @@ static int32_t checkForNeighborFileBlock(STsdbReader* pReader, STableBlockScanIn
setFileBlockActiveInBlockIter(pBlockIter, neighborIndex, step);
// 3. load the neighbor block, and set it to be the currently accessed file data block
tBlockDataReset(&pStatus->fileBlockData);
tBlockDataClearData(&pStatus->fileBlockData);
int32_t code = doLoadFileBlockData(pReader, pBlockIter, pScanInfo, &pStatus->fileBlockData);
if (code != TSDB_CODE_SUCCESS) {
return code;
@ -2563,6 +2577,7 @@ void tsdbReaderClose(STsdbReader* pReader) {
}
}
taosMemoryFree(pSupInfo->buildBuf);
tBlockDataClear(&pReader->status.fileBlockData, true);
cleanupDataBlockIterator(&pReader->status.blockIter);
@ -2760,13 +2775,9 @@ static SArray* doRetrieveDataBlock(STsdbReader* pReader) {
SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(&pStatus->blockIter);
STableBlockScanInfo* pBlockScanInfo = taosHashGet(pStatus->pTableMap, &pFBlock->uid, sizeof(pFBlock->uid));
int32_t code = tBlockDataInit(&pStatus->fileBlockData);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
return NULL;
}
code = doLoadFileBlockData(pReader, &pStatus->blockIter, pBlockScanInfo, &pStatus->fileBlockData);
tBlockDataReset(&pStatus->fileBlockData);
tBlockDataClearData(&pStatus->fileBlockData);
int32_t code = doLoadFileBlockData(pReader, &pStatus->blockIter, pBlockScanInfo, &pStatus->fileBlockData);
if (code != TSDB_CODE_SUCCESS) {
tBlockDataClear(&pStatus->fileBlockData, 1);
@ -2775,7 +2786,6 @@ static SArray* doRetrieveDataBlock(STsdbReader* pReader) {
}
copyBlockDataToSDataBlock(pReader, pBlockScanInfo);
tBlockDataClear(&pStatus->fileBlockData, 1);
return pReader->pResBlock->pDataBlock;
}
@ -2872,9 +2882,7 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa
while (true) {
if (hasNext) {
SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter);
STableBlockScanInfo* pScanInfo = taosHashGet(pStatus->pTableMap, &pFBlock->uid, sizeof(pFBlock->uid));
SBlock* pBlock = taosArrayGet(pScanInfo->pBlockList, pFBlock->tbBlockIdx);
SBlock* pBlock = getCurrentBlock(pBlockIter);
int32_t numOfRows = pBlock->nRow;
pTableBlockInfo->totalRows += numOfRows;
@ -2895,7 +2903,6 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa
pTableBlockInfo->blockRowsHisto[bucketIndex]++;
hasNext = blockIteratorNext(&pStatus->blockIter);
} else {
code = initForFirstBlockInFile(pReader, pBlockIter);
if ((code != TSDB_CODE_SUCCESS) || (pReader->status.loadFromFile == false)) {

View File

@ -1022,6 +1022,10 @@ void tBlockDataClear(SBlockData *pBlockData, int8_t deepClear) {
tFree((uint8_t *)pBlockData->aTSKEY);
taosArrayDestroy(pBlockData->aIdx);
taosArrayDestroyEx(pBlockData->aColData, deepClear ? tColDataClear : NULL);
pBlockData->aColData = NULL;
pBlockData->aIdx = NULL;
pBlockData->aTSKEY = NULL;
pBlockData->aVersion = NULL;
}
int32_t tBlockDataSetSchema(SBlockData *pBlockData, STSchema *pTSchema) {

View File

@ -21,7 +21,7 @@ int vnodeQueryOpen(SVnode *pVnode) {
void vnodeQueryClose(SVnode *pVnode) { qWorkerDestroy((void **)&pVnode->pQuery); }
int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg) {
int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg, bool direct) {
STableInfoReq infoReq = {0};
STableMetaRsp metaRsp = {0};
SMetaReader mer1 = {0};
@ -99,7 +99,12 @@ int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg) {
goto _exit;
}
pRsp = rpcMallocCont(rspLen);
if (direct) {
pRsp = rpcMallocCont(rspLen);
} else {
pRsp = taosMemoryCalloc(1, rspLen);
}
if (pRsp == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
@ -117,15 +122,19 @@ _exit:
qError("get table %s meta failed cause of %s", infoReq.tbName, tstrerror(code));
}
tmsgSendRsp(&rpcMsg);
if (direct) {
tmsgSendRsp(&rpcMsg);
} else {
*pMsg = rpcMsg;
}
taosMemoryFree(metaRsp.pSchemas);
metaReaderClear(&mer2);
metaReaderClear(&mer1);
return TSDB_CODE_SUCCESS;
}
int vnodeGetTableCfg(SVnode *pVnode, SRpcMsg *pMsg) {
int vnodeGetTableCfg(SVnode *pVnode, SRpcMsg *pMsg, bool direct) {
STableCfgReq cfgReq = {0};
STableCfgRsp cfgRsp = {0};
SMetaReader mer1 = {0};
@ -209,7 +218,12 @@ int vnodeGetTableCfg(SVnode *pVnode, SRpcMsg *pMsg) {
goto _exit;
}
pRsp = rpcMallocCont(rspLen);
if (direct) {
pRsp = rpcMallocCont(rspLen);
} else {
pRsp = taosMemoryCalloc(1, rspLen);
}
if (pRsp == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
@ -227,14 +241,124 @@ _exit:
qError("get table %s cfg failed cause of %s", cfgReq.tbName, tstrerror(code));
}
tmsgSendRsp(&rpcMsg);
if (direct) {
tmsgSendRsp(&rpcMsg);
} else {
*pMsg = rpcMsg;
}
tFreeSTableCfgRsp(&cfgRsp);
metaReaderClear(&mer2);
metaReaderClear(&mer1);
return TSDB_CODE_SUCCESS;
}
int32_t vnodeGetBatchMeta(SVnode *pVnode, SRpcMsg *pMsg) {
int32_t code = 0;
int32_t offset = 0;
int32_t rspSize = 0;
SBatchReq *batchReq = (SBatchReq*)pMsg->pCont;
int32_t msgNum = ntohl(batchReq->msgNum);
offset += sizeof(SBatchReq);
SBatchMsg req = {0};
SBatchRsp rsp = {0};
SRpcMsg reqMsg = *pMsg;
SRpcMsg rspMsg = {0};
void* pRsp = NULL;
SArray* batchRsp = taosArrayInit(msgNum, sizeof(SBatchRsp));
if (NULL == batchRsp) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
for (int32_t i = 0; i < msgNum; ++i) {
req.msgType = ntohl(*(int32_t*)((char*)pMsg->pCont + offset));
offset += sizeof(req.msgType);
req.msgLen = ntohl(*(int32_t*)((char*)pMsg->pCont + offset));
offset += sizeof(req.msgLen);
req.msg = (char*)pMsg->pCont + offset;
offset += req.msgLen;
reqMsg.msgType = req.msgType;
reqMsg.pCont = req.msg;
reqMsg.contLen = req.msgLen;
switch (req.msgType) {
case TDMT_VND_TABLE_META:
vnodeGetTableMeta(pVnode, &reqMsg, false);
break;
case TDMT_VND_TABLE_CFG:
vnodeGetTableCfg(pVnode, &reqMsg, false);
break;
default:
qError("invalid req msgType %d", req.msgType);
reqMsg.code = TSDB_CODE_INVALID_MSG;
reqMsg.pCont = NULL;
reqMsg.contLen = 0;
break;
}
rsp.reqType = reqMsg.msgType;
rsp.msgLen = reqMsg.contLen;
rsp.rspCode = reqMsg.code;
rsp.msg = reqMsg.pCont;
taosArrayPush(batchRsp, &rsp);
rspSize += sizeof(rsp) + rsp.msgLen - POINTER_BYTES;
}
rspSize += sizeof(int32_t);
offset = 0;
pRsp = rpcMallocCont(rspSize);
if (pRsp == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
*(int32_t*)((char*)pRsp + offset) = htonl(msgNum);
offset += sizeof(msgNum);
for (int32_t i = 0; i < msgNum; ++i) {
SBatchRsp *p = taosArrayGet(batchRsp, i);
*(int32_t*)((char*)pRsp + offset) = htonl(p->reqType);
offset += sizeof(p->reqType);
*(int32_t*)((char*)pRsp + offset) = htonl(p->msgLen);
offset += sizeof(p->msgLen);
*(int32_t*)((char*)pRsp + offset) = htonl(p->rspCode);
offset += sizeof(p->rspCode);
memcpy((char*)pRsp + offset, p->msg, p->msgLen);
offset += p->msgLen;
taosMemoryFreeClear(p->msg);
}
taosArrayDestroy(batchRsp);
batchRsp = NULL;
_exit:
rspMsg.info = pMsg->info;
rspMsg.pCont = pRsp;
rspMsg.contLen = rspSize;
rspMsg.code = code;
rspMsg.msgType = pMsg->msgType;
if (code) {
qError("vnd get batch meta failed cause of %s", tstrerror(code));
}
taosArrayDestroyEx(batchRsp, tFreeSBatchRsp);
tmsgSendRsp(&rspMsg);
return code;
}
int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad) {
pLoad->vgId = TD_VID(pVnode);
pLoad->syncState = syncGetMyRole(pVnode->sync);

View File

@ -106,7 +106,9 @@ int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
.meta = pVnode->pMeta, .config = &pVnode->config, .vnode = pVnode, .pMsgCb = &pVnode->msgCb};
code = qWorkerProcessDeleteMsg(&handle, pVnode->pQuery, pMsg, &res);
if (code) goto _err;
if (code) {
goto _err;
}
// malloc and encode
tEncodeSize(tEncodeDeleteRes, &res, size, ret);
@ -296,7 +298,7 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
vTrace("message in fetch queue is processing");
if ((pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_VND_TABLE_META ||
pMsg->msgType == TDMT_VND_TABLE_CFG) &&
pMsg->msgType == TDMT_VND_TABLE_CFG || pMsg->msgType == TDMT_VND_BATCH_META) &&
!vnodeIsLeader(pVnode)) {
vnodeRedirectRpcMsg(pVnode, pMsg);
return 0;
@ -318,9 +320,11 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
case TDMT_SCH_QUERY_HEARTBEAT:
return qWorkerProcessHbMsg(pVnode, pVnode->pQuery, pMsg, 0);
case TDMT_VND_TABLE_META:
return vnodeGetTableMeta(pVnode, pMsg);
return vnodeGetTableMeta(pVnode, pMsg, true);
case TDMT_VND_TABLE_CFG:
return vnodeGetTableCfg(pVnode, pMsg);
return vnodeGetTableCfg(pVnode, pMsg, true);
case TDMT_VND_BATCH_META:
return vnodeGetBatchMeta(pVnode, pMsg);
case TDMT_VND_CONSUME:
return tqProcessPollReq(pVnode->pTq, pMsg);
case TDMT_STREAM_TASK_RUN:
@ -993,6 +997,11 @@ static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t version, void *pReq
SDecoder *pCoder = &(SDecoder){0};
SDeleteRes *pRes = &(SDeleteRes){0};
pRsp->msgType = TDMT_VND_DELETE_RSP;
pRsp->pCont = NULL;
pRsp->contLen = 0;
pRsp->code = TSDB_CODE_SUCCESS;
pRes->uidList = taosArrayInit(0, sizeof(tb_uid_t));
if (pRes->uidList == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
@ -1010,6 +1019,15 @@ static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t version, void *pReq
tDecoderClear(pCoder);
taosArrayDestroy(pRes->uidList);
SVDeleteRsp rsp = {.affectedRows = pRes->affectedRows};
int32_t ret = 0;
tEncodeSize(tEncodeSVDeleteRsp, &rsp, pRsp->contLen, ret);
pRsp->pCont = rpcMallocCont(pRsp->contLen);
SEncoder ec = {0};
tEncoderInit(&ec, pRsp->pCont, pRsp->contLen);
tEncodeSVDeleteRsp(&ec, &rsp);
tEncoderClear(&ec);
return code;
_err:

View File

@ -31,6 +31,7 @@ extern "C" {
#define CTG_DEFAULT_RENT_SECOND 10
#define CTG_DEFAULT_RENT_SLOT_SIZE 10
#define CTG_DEFAULT_MAX_RETRY_TIMES 3
#define CTG_DEFAULT_BATCH_NUM 64
#define CTG_RENT_SLOT_SECOND 1.5
@ -38,6 +39,8 @@ extern "C" {
#define CTG_ERR_CODE_TABLE_NOT_EXIST TSDB_CODE_PAR_TABLE_NOT_EXIST
#define CTG_BATCH_FETCH 1
enum {
CTG_READ = 1,
CTG_WRITE,
@ -200,8 +203,20 @@ typedef struct SCatalog {
SCtgRentMgmt stbRent;
} SCatalog;
typedef struct SCtgBatch {
int32_t batchId;
int32_t msgType;
int32_t msgSize;
SArray* pMsgs;
SRequestConnInfo conn;
char dbFName[TSDB_DB_FNAME_LEN];
SArray* pTaskIds;
} SCtgBatch;
typedef struct SCtgJob {
int64_t refId;
int32_t batchId;
SHashObj* pBatchs;
SArray* pTasks;
int32_t taskDone;
SMetaData jobRes;
@ -236,6 +251,16 @@ typedef struct SCtgMsgCtx {
char* target;
} SCtgMsgCtx;
typedef struct SCtgTaskCallbackParam {
uint64_t queryId;
int64_t refId;
SArray* taskId;
int32_t reqType;
int32_t batchId;
} SCtgTaskCallbackParam;
typedef struct SCtgTask SCtgTask;
typedef int32_t (*ctgSubTaskCbFp)(SCtgTask*);
@ -258,6 +283,7 @@ typedef struct SCtgTask {
SRWLatch lock;
SArray* pParents;
SCtgSubRes subRes;
SHashObj* pBatchs;
} SCtgTask;
typedef int32_t (*ctgInitTaskFp)(SCtgJob*, int32_t, void*);
@ -618,6 +644,7 @@ int32_t ctgGetTbMetaFromVnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SNa
int32_t ctgGetTableCfgFromVnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SName* pTableName, SVgroupInfo *vgroupInfo, STableCfg **out, SCtgTask* pTask);
int32_t ctgGetTableCfgFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SName* pTableName, STableCfg **out, SCtgTask* pTask);
int32_t ctgGetSvrVerFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, char **out, SCtgTask* pTask);
int32_t ctgLaunchBatchs(SCatalog* pCtg, SCtgJob *pJob, SHashObj* pBatchs);
int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, const SCatalogReq* pReq, catalogCallback fp, void* param);
int32_t ctgLaunchJob(SCtgJob *pJob);
@ -626,6 +653,9 @@ int32_t ctgLaunchSubTask(SCtgTask *pTask, CTG_TASK_TYPE type, ctgSubTaskCbFp fp,
int32_t ctgGetTbCfgCb(SCtgTask *pTask);
void ctgFreeHandle(SCatalog* pCatalog);
void ctgFreeMsgSendParam(void* param);
void ctgFreeBatch(SCtgBatch *pBatch);
void ctgFreeBatchs(SHashObj *pBatchs);
int32_t ctgCloneVgInfo(SDBVgInfo *src, SDBVgInfo **dst);
int32_t ctgCloneMetaOutput(STableMetaOutput *output, STableMetaOutput **pOutput);
int32_t ctgGenerateVgList(SCatalog *pCtg, SHashObj *vgHash, SArray** pList);
@ -642,7 +672,7 @@ int32_t ctgDbVgVersionSearchCompare(const void* key1, const void* key2);
void ctgFreeSTableMetaOutput(STableMetaOutput* pOutput);
int32_t ctgUpdateMsgCtx(SCtgMsgCtx* pCtx, int32_t reqType, void* out, char* target);
char * ctgTaskTypeStr(CTG_TASK_TYPE type);
int32_t ctgUpdateSendTargetInfo(SMsgSendInfo *pMsgSendInfo, int32_t msgType, SCtgTask* pTask);
int32_t ctgUpdateSendTargetInfo(SMsgSendInfo *pMsgSendInfo, int32_t msgType, char* dbFName, int32_t vgId);
int32_t ctgCloneTableIndex(SArray* pIndex, SArray** pRes);
void ctgFreeSTableIndex(void *info);
void ctgClearSubTaskRes(SCtgSubRes *pRes);

View File

@ -20,12 +20,6 @@
extern "C" {
#endif
typedef struct SCtgTaskCallbackParam {
uint64_t queryId;
int64_t refId;
uint64_t taskId;
int32_t reqType;
} SCtgTaskCallbackParam;
#ifdef __cplusplus

View File

@ -473,8 +473,15 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, const
pJob->tbCfgNum = tbCfgNum;
pJob->svrVerNum = svrVerNum;
pJob->pTasks = taosArrayInit(taskNum, sizeof(SCtgTask));
#if CTG_BATCH_FETCH
pJob->pBatchs = taosHashInit(CTG_DEFAULT_BATCH_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
if (NULL == pJob->pBatchs) {
ctgError("taosHashInit %d batch failed", CTG_DEFAULT_BATCH_NUM);
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
#endif
pJob->pTasks = taosArrayInit(taskNum, sizeof(SCtgTask));
if (NULL == pJob->pTasks) {
ctgError("taosArrayInit %d tasks failed", taskNum);
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
@ -560,7 +567,7 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, const
_return:
taosMemoryFreeClear(*job);
ctgFreeJob(*job);
CTG_RET(code);
}
@ -776,7 +783,8 @@ int32_t ctgCallSubCb(SCtgTask *pTask) {
pParent->subRes.code = code;
}
}
pParent->pBatchs = pTask->pBatchs;
CTG_ERR_JRET(pParent->subRes.fp(pParent));
}
@ -872,7 +880,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *
SVgroupInfo vgInfo = {0};
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, ctx->pName, &vgInfo));
ctgDebug("will refresh tbmeta, not supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(ctx->pName), ctx->flag);
ctgDebug("will refresh tbmeta, supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(ctx->pName), ctx->flag);
ctx->vgId = vgInfo.vgId;
CTG_ERR_JRET(ctgGetTbMetaFromVnode(pCtg, pConn, ctx->pName, &vgInfo, NULL, pTask));
@ -890,7 +898,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *
return TSDB_CODE_SUCCESS;
}
ctgError("no tbmeta got, tbNmae:%s", tNameGetTableName(ctx->pName));
ctgError("no tbmeta got, tbName:%s", tNameGetTableName(ctx->pName));
ctgRemoveTbMetaFromCache(pCtg, ctx->pName, false);
CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST);
@ -1653,6 +1661,7 @@ int32_t ctgSetSubTaskCb(SCtgTask *pSub, SCtgTask *pTask) {
if (CTG_TASK_DONE == pSub->status) {
pTask->subRes.code = pSub->code;
CTG_ERR_JRET((*gCtgAsyncFps[pTask->type].cloneFp)(pSub, &pTask->subRes.res));
pTask->pBatchs = pSub->pBatchs;
CTG_ERR_JRET(pTask->subRes.fp(pTask));
} else {
if (NULL == pSub->pParents) {
@ -1690,6 +1699,7 @@ int32_t ctgLaunchSubTask(SCtgTask *pTask, CTG_TASK_TYPE type, ctgSubTaskCbFp fp,
CTG_ERR_RET(ctgSetSubTaskCb(pSub, pTask));
if (newTask) {
pSub->pBatchs = pTask->pBatchs;
CTG_ERR_RET((*gCtgAsyncFps[pSub->type].launchFp)(pSub));
pSub->status = CTG_TASK_LAUNCHED;
}
@ -1702,9 +1712,11 @@ int32_t ctgLaunchJob(SCtgJob *pJob) {
for (int32_t i = 0; i < taskNum; ++i) {
SCtgTask *pTask = taosArrayGet(pJob->pTasks, i);
pTask->pBatchs = pJob->pBatchs;
qDebug("QID:0x%" PRIx64 " ctg launch [%dth] task", pJob->queryId, pTask->taskId);
CTG_ERR_RET((*gCtgAsyncFps[pTask->type].launchFp)(pTask));
pTask->status = CTG_TASK_LAUNCHED;
}
@ -1712,6 +1724,10 @@ int32_t ctgLaunchJob(SCtgJob *pJob) {
qDebug("QID:0x%" PRIx64 " ctg call user callback with rsp %s", pJob->queryId, tstrerror(pJob->jobResCode));
taosAsyncExec(ctgCallUserCb, pJob, NULL);
#if CTG_BATCH_FETCH
} else {
ctgLaunchBatchs(pJob->pCtg, pJob, pJob->pBatchs);
#endif
}
return TSDB_CODE_SUCCESS;

View File

@ -18,9 +18,67 @@
#include "tname.h"
#include "catalogInt.h"
#include "systable.h"
#include "ctgRemote.h"
#include "tref.h"
int32_t ctgHandleBatchRsp(SCtgJob* pJob, SCtgTaskCallbackParam* cbParam, SDataBuf *pMsg, int32_t rspCode) {
int32_t code = 0;
SArray* pTaskId = cbParam->taskId;
SCatalog* pCtg = pJob->pCtg;
int32_t taskNum = taosArrayGetSize(pTaskId);
SDataBuf taskMsg = *pMsg;
int32_t offset = 0;
int32_t msgNum = (TSDB_CODE_SUCCESS == rspCode && pMsg->pData && (pMsg->len > 0)) ? ntohl(*(int32_t*)pMsg->pData) : 0;
ASSERT(taskNum == msgNum || 0 == msgNum);
ctgDebug("QID:0x%" PRIx64 " ctg got batch %d rsp %s", pJob->queryId, cbParam->batchId, TMSG_INFO(cbParam->reqType + 1));
offset += sizeof(msgNum);
SBatchRsp rsp = {0};
SHashObj* pBatchs = taosHashInit(taskNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
if (NULL == pBatchs) {
ctgError("taosHashInit %d batch failed", taskNum);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
for (int32_t i = 0; i < taskNum; ++i) {
int32_t* taskId = taosArrayGet(pTaskId, i);
SCtgTask *pTask = taosArrayGet(pJob->pTasks, *taskId);
if (msgNum > 0) {
rsp.reqType = ntohl(*(int32_t*)((char*)pMsg->pData + offset));
offset += sizeof(rsp.reqType);
rsp.msgLen = ntohl(*(int32_t*)((char*)pMsg->pData + offset));
offset += sizeof(rsp.msgLen);
rsp.rspCode = ntohl(*(int32_t*)((char*)pMsg->pData + offset));
offset += sizeof(rsp.rspCode);
rsp.msg = ((char*)pMsg->pData) + offset;
offset += rsp.msgLen;
taskMsg.msgType = rsp.reqType;
taskMsg.pData = rsp.msg;
taskMsg.len = rsp.msgLen;
} else {
rsp.reqType = -1;
taskMsg.msgType = -1;
taskMsg.pData = NULL;
taskMsg.len = 0;
}
pTask->pBatchs = pBatchs;
ctgDebug("QID:0x%" PRIx64 " ctg task %d start to handle rsp %s", pJob->queryId, pTask->taskId, TMSG_INFO(taskMsg.msgType + 1));
(*gCtgAsyncFps[pTask->type].handleRspFp)(pTask, rsp.reqType, &taskMsg, (rsp.rspCode ? rsp.rspCode : rspCode));
}
CTG_ERR_JRET(ctgLaunchBatchs(pJob->pCtg, pJob, pBatchs));
_return:
ctgFreeBatchs(pBatchs);
CTG_RET(code);
}
int32_t ctgProcessRspMsg(void* out, int32_t reqType, char* msg, int32_t msgSize, int32_t rspCode, char* target) {
int32_t code = 0;
@ -233,6 +291,11 @@ int32_t ctgProcessRspMsg(void* out, int32_t reqType, char* msg, int32_t msgSize,
break;
}
default:
if (TSDB_CODE_SUCCESS != rspCode) {
qError("Got error rsp, error:%s", tstrerror(rspCode));
CTG_ERR_RET(rspCode);
}
qError("invalid req type %s", TMSG_INFO(reqType));
return TSDB_CODE_APP_ERROR;
}
@ -254,12 +317,32 @@ int32_t ctgHandleMsgCallback(void *param, SDataBuf *pMsg, int32_t rspCode) {
goto _return;
}
SCtgTask *pTask = taosArrayGet(pJob->pTasks, cbParam->taskId);
SCatalog* pCtg = pJob->pCtg;
qDebug("QID:0x%" PRIx64 " ctg task %d start to handle rsp %s", pJob->queryId, pTask->taskId, TMSG_INFO(cbParam->reqType + 1));
if (TDMT_VND_BATCH_META == cbParam->reqType || TDMT_MND_BATCH_META == cbParam->reqType) {
CTG_ERR_JRET(ctgHandleBatchRsp(pJob, cbParam, pMsg, rspCode));
} else {
int32_t *taskId = taosArrayGet(cbParam->taskId, 0);
SCtgTask *pTask = taosArrayGet(pJob->pTasks, *taskId);
qDebug("QID:0x%" PRIx64 " ctg task %d start to handle rsp %s", pJob->queryId, pTask->taskId, TMSG_INFO(cbParam->reqType + 1));
#if CTG_BATCH_FETCH
SHashObj* pBatchs = taosHashInit(CTG_DEFAULT_BATCH_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
if (NULL == pBatchs) {
ctgError("taosHashInit %d batch failed", CTG_DEFAULT_BATCH_NUM);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
pTask->pBatchs = pBatchs;
#endif
CTG_ERR_JRET((*gCtgAsyncFps[pTask->type].handleRspFp)(pTask, cbParam->reqType, pMsg, rspCode));
#if CTG_BATCH_FETCH
CTG_ERR_JRET(ctgLaunchBatchs(pJob->pCtg, pJob, pBatchs));
#endif
}
CTG_ERR_JRET((*gCtgAsyncFps[pTask->type].handleRspFp)(pTask, cbParam->reqType, pMsg, rspCode));
_return:
taosMemoryFree(pMsg->pData);
@ -272,12 +355,12 @@ _return:
}
int32_t ctgMakeMsgSendInfo(SCtgTask* pTask, int32_t msgType, SMsgSendInfo **pMsgSendInfo) {
int32_t ctgMakeMsgSendInfo(SCtgJob* pJob, SArray* pTaskId, int32_t batchId, int32_t msgType, SMsgSendInfo **pMsgSendInfo) {
int32_t code = 0;
SMsgSendInfo *msgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
if (NULL == msgSendInfo) {
qError("calloc %d failed", (int32_t)sizeof(SMsgSendInfo));
CTG_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
CTG_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
SCtgTaskCallbackParam *param = taosMemoryCalloc(1, sizeof(SCtgTaskCallbackParam));
@ -287,12 +370,13 @@ int32_t ctgMakeMsgSendInfo(SCtgTask* pTask, int32_t msgType, SMsgSendInfo **pMsg
}
param->reqType = msgType;
param->queryId = pTask->pJob->queryId;
param->refId = pTask->pJob->refId;
param->taskId = pTask->taskId;
param->queryId = pJob->queryId;
param->refId = pJob->refId;
param->taskId = pTaskId;
param->batchId = batchId;
msgSendInfo->param = param;
msgSendInfo->paramFreeFp = taosMemoryFree;
msgSendInfo->paramFreeFp = ctgFreeMsgSendParam;
msgSendInfo->fp = ctgHandleMsgCallback;
*pMsgSendInfo = msgSendInfo;
@ -301,18 +385,19 @@ int32_t ctgMakeMsgSendInfo(SCtgTask* pTask, int32_t msgType, SMsgSendInfo **pMsg
_return:
taosMemoryFree(param);
taosMemoryFree(msgSendInfo);
taosArrayDestroy(pTaskId);
destroySendMsgInfo(msgSendInfo);
CTG_RET(code);
}
int32_t ctgAsyncSendMsg(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTask* pTask, int32_t msgType, void *msg, uint32_t msgSize) {
int32_t ctgAsyncSendMsg(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob* pJob, SArray* pTaskId,
int32_t batchId, char* dbFName, int32_t vgId, int32_t msgType, void *msg, uint32_t msgSize) {
int32_t code = 0;
SMsgSendInfo *pMsgSendInfo = NULL;
CTG_ERR_JRET(ctgMakeMsgSendInfo(pTask, msgType, &pMsgSendInfo));
CTG_ERR_JRET(ctgMakeMsgSendInfo(pJob, pTaskId, batchId, msgType, &pMsgSendInfo));
ctgUpdateSendTargetInfo(pMsgSendInfo, msgType, pTask);
ctgUpdateSendTargetInfo(pMsgSendInfo, msgType, dbFName, vgId);
pMsgSendInfo->requestId = pConn->requestId;
pMsgSendInfo->requestObjRefId = pConn->requestObjRefId;
@ -323,24 +408,178 @@ int32_t ctgAsyncSendMsg(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTask* pTask
int64_t transporterId = 0;
code = asyncSendMsgToServer(pConn->pTrans, &pConn->mgmtEps, &transporterId, pMsgSendInfo);
pMsgSendInfo = NULL;
if (code) {
ctgError("asyncSendMsgToSever failed, error: %s", tstrerror(code));
CTG_ERR_JRET(code);
}
ctgDebug("ctg req msg sent, reqId:0x%" PRIx64 ", msg type:%d, %s", pTask->pJob->queryId, msgType, TMSG_INFO(msgType));
ctgDebug("ctg req msg sent, reqId:0x%" PRIx64 ", msg type:%d, %s", pJob->queryId, msgType, TMSG_INFO(msgType));
return TSDB_CODE_SUCCESS;
_return:
if (pMsgSendInfo) {
taosMemoryFreeClear(pMsgSendInfo->param);
taosMemoryFreeClear(pMsgSendInfo);
destroySendMsgInfo(pMsgSendInfo);
}
CTG_RET(code);
}
int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo *pConn, SCtgTask* pTask, int32_t msgType, void *msg, uint32_t msgSize) {
int32_t code = 0;
SHashObj* pBatchs = pTask->pBatchs;
SCtgJob* pJob = pTask->pJob;
SCtgBatch* pBatch = taosHashGet(pBatchs, &vgId, sizeof(vgId));
int32_t taskNum = taosArrayGetSize(pTask->pJob->pTasks);
SCtgBatch newBatch = {0};
SBatchMsg req = {0};
if (NULL == pBatch) {
newBatch.pMsgs = taosArrayInit(taskNum, sizeof(SBatchMsg));
newBatch.pTaskIds = taosArrayInit(taskNum, sizeof(int32_t));
if (NULL == newBatch.pMsgs || NULL == newBatch.pTaskIds) {
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
newBatch.conn = *pConn;
req.msgType = msgType;
req.msgLen = msgSize;
req.msg = msg;
if (NULL == taosArrayPush(newBatch.pMsgs, &req)) {
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
if (NULL == taosArrayPush(newBatch.pTaskIds, &pTask->taskId)) {
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
newBatch.msgSize = sizeof(SBatchReq) + sizeof(req) + msgSize - POINTER_BYTES;
if (vgId > 0) {
if (TDMT_VND_TABLE_CFG == msgType) {
SCtgTbCfgCtx* ctx = (SCtgTbCfgCtx*)pTask->taskCtx;
tNameGetFullDbName(ctx->pName, newBatch.dbFName);
} else if (TDMT_VND_TABLE_META == msgType) {
SCtgTbMetaCtx* ctx = (SCtgTbMetaCtx*)pTask->taskCtx;
tNameGetFullDbName(ctx->pName, newBatch.dbFName);
} else {
ctgError("invalid vnode msgType %d", msgType);
CTG_ERR_JRET(TSDB_CODE_APP_ERROR);
}
}
newBatch.msgType = (vgId > 0) ? TDMT_VND_BATCH_META : TDMT_MND_BATCH_META;
newBatch.batchId = atomic_add_fetch_32(&pJob->batchId, 1);
if (0 != taosHashPut(pBatchs, &vgId, sizeof(vgId), &newBatch, sizeof(newBatch))) {
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
ctgDebug("task %d %s req added to batch %d, target vgId %d", pTask->taskId, TMSG_INFO(msgType), newBatch.batchId, vgId);
return TSDB_CODE_SUCCESS;
}
req.msgType = msgType;
req.msgLen = msgSize;
req.msg = msg;
if (NULL == taosArrayPush(pBatch->pMsgs, &req)) {
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
if (NULL == taosArrayPush(pBatch->pTaskIds, &pTask->taskId)) {
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
pBatch->msgSize += sizeof(req) + msgSize - POINTER_BYTES;
if (vgId > 0) {
if (TDMT_VND_TABLE_CFG == msgType) {
SCtgTbCfgCtx* ctx = (SCtgTbCfgCtx*)pTask->taskCtx;
tNameGetFullDbName(ctx->pName, newBatch.dbFName);
} else if (TDMT_VND_TABLE_META == msgType) {
SCtgTbMetaCtx* ctx = (SCtgTbMetaCtx*)pTask->taskCtx;
tNameGetFullDbName(ctx->pName, newBatch.dbFName);
} else {
ctgError("invalid vnode msgType %d", msgType);
CTG_ERR_JRET(TSDB_CODE_APP_ERROR);
}
}
ctgDebug("task %d %s req added to batch %d, target vgId %d", pTask->taskId, TMSG_INFO(msgType), pBatch->batchId, vgId);
return TSDB_CODE_SUCCESS;
_return:
ctgFreeBatch(&newBatch);
taosMemoryFree(msg);
return code;
}
int32_t ctgBuildBatchReqMsg(SCtgBatch* pBatch, int32_t vgId, void** msg) {
*msg = taosMemoryMalloc(pBatch->msgSize);
if (NULL == (*msg)) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
int32_t offset = 0;
int32_t num = taosArrayGetSize(pBatch->pMsgs);
SBatchReq *pBatchReq = (SBatchReq*)(*msg);
pBatchReq->header.vgId = htonl(vgId);
pBatchReq->msgNum = htonl(num);
offset += sizeof(SBatchReq);
for (int32_t i = 0; i < num; ++i) {
SBatchMsg* pReq = taosArrayGet(pBatch->pMsgs, i);
*(int32_t*)((char*)(*msg) + offset) = htonl(pReq->msgType);
offset += sizeof(pReq->msgType);
*(int32_t*)((char*)(*msg) + offset) = htonl(pReq->msgLen);
offset += sizeof(pReq->msgLen);
memcpy((char*)(*msg) + offset, pReq->msg, pReq->msgLen);
offset += pReq->msgLen;
}
ASSERT(pBatch->msgSize == offset);
qDebug("batch req %d to vg %d msg built with %d meta reqs", pBatch->batchId, vgId, num);
return TSDB_CODE_SUCCESS;
}
int32_t ctgLaunchBatchs(SCatalog* pCtg, SCtgJob *pJob, SHashObj* pBatchs) {
int32_t code = 0;
void* msg = NULL;
void* p = taosHashIterate(pBatchs, NULL);
while (NULL != p) {
size_t len = 0;
int32_t* vgId = taosHashGetKey(p, &len);
SCtgBatch* pBatch = (SCtgBatch*)p;
ctgDebug("QID:0x%" PRIx64 " ctg start to launch batch %d", pJob->queryId, pBatch->batchId);
CTG_ERR_JRET(ctgBuildBatchReqMsg(pBatch, *vgId, &msg));
code = ctgAsyncSendMsg(pCtg, &pBatch->conn, pJob, pBatch->pTaskIds, pBatch->batchId,
pBatch->dbFName, *vgId, pBatch->msgType, msg, pBatch->msgSize);
pBatch->pTaskIds = NULL;
CTG_ERR_JRET(code);
p = taosHashIterate(pBatchs, p);
}
return TSDB_CODE_SUCCESS;
_return:
if (p) {
taosHashCancelIterate(pBatchs, p);
}
taosMemoryFree(msg);
CTG_RET(code);
}
int32_t ctgGetQnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray *out, SCtgTask* pTask) {
char *msg = NULL;
int32_t msgLen = 0;
@ -361,7 +600,18 @@ int32_t ctgGetQnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, NULL));
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
#if CTG_BATCH_FETCH
CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
if (NULL == pTaskId) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
taosArrayPush(pTaskId, &pTask->taskId);
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
#endif
}
SRpcMsg rpcMsg = {
@ -396,7 +646,18 @@ int32_t ctgGetDnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray
if (pTask) {
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, NULL, NULL));
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
#if CTG_BATCH_FETCH
CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
if (NULL == pTaskId) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
taosArrayPush(pTaskId, &pTask->taskId);
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
#endif
}
SRpcMsg rpcMsg = {
@ -436,8 +697,18 @@ int32_t ctgGetDBVgInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SBuildU
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, input->db));
#if CTG_BATCH_FETCH
CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
if (NULL == pTaskId) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
taosArrayPush(pTaskId, &pTask->taskId);
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
#endif
}
SRpcMsg rpcMsg = {
@ -476,8 +747,18 @@ int32_t ctgGetDBCfgFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const char
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, (char*)dbFName));
#if CTG_BATCH_FETCH
CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
if (NULL == pTaskId) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
taosArrayPush(pTaskId, &pTask->taskId);
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
#endif
}
SRpcMsg rpcMsg = {
@ -516,8 +797,18 @@ int32_t ctgGetIndexInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, (char*)indexName));
#if CTG_BATCH_FETCH
CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
if (NULL == pTaskId) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
taosArrayPush(pTaskId, &pTask->taskId);
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
#endif
}
SRpcMsg rpcMsg = {
@ -559,8 +850,18 @@ int32_t ctgGetTbIndexFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SName *n
}
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, (char*)tbFName));
#if CTG_BATCH_FETCH
CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
if (NULL == pTaskId) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
taosArrayPush(pTaskId, &pTask->taskId);
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
#endif
}
SRpcMsg rpcMsg = {
@ -599,8 +900,18 @@ int32_t ctgGetUdfInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const ch
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, (char*)funcName));
#if CTG_BATCH_FETCH
CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
if (NULL == pTaskId) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
taosArrayPush(pTaskId, &pTask->taskId);
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
#endif
}
SRpcMsg rpcMsg = {
@ -639,8 +950,18 @@ int32_t ctgGetUserDbAuthFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, (char*)user));
#if CTG_BATCH_FETCH
CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
if (NULL == pTaskId) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
taosArrayPush(pTaskId, &pTask->taskId);
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
#endif
}
SRpcMsg rpcMsg = {
@ -684,8 +1005,17 @@ int32_t ctgGetTbMetaFromMnodeImpl(SCatalog* pCtg, SRequestConnInfo *pConn, char
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, pOut, tbFName));
#if CTG_BATCH_FETCH
CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
if (NULL == pTaskId) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
taosArrayPush(pTaskId, &pTask->taskId);
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
#endif
}
SRpcMsg rpcMsg = {
@ -744,7 +1074,21 @@ int32_t ctgGetTbMetaFromVnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SNa
.requestId = pConn->requestId,
.requestObjRefId = pConn->requestObjRefId,
.mgmtEps = vgroupInfo->epSet};
CTG_RET(ctgAsyncSendMsg(pCtg, &vConn, pTask, reqType, msg, msgLen));
#if CTG_BATCH_FETCH
CTG_RET(ctgAddBatch(pCtg, vgroupInfo->vgId, &vConn, pTask, reqType, msg, msgLen));
#else
SCtgTbMetaCtx* ctx = (SCtgTbMetaCtx*)pTask->taskCtx;
char dbFName[TSDB_DB_FNAME_LEN];
tNameGetFullDbName(ctx->pName, dbFName);
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
if (NULL == pTaskId) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
taosArrayPush(pTaskId, &pTask->taskId);
CTG_RET(ctgAsyncSendMsg(pCtg, &vConn, pTask->pJob, pTaskId, -1, dbFName, ctx->vgId, reqType, msg, msgLen));
#endif
}
SRpcMsg rpcMsg = {
@ -791,7 +1135,20 @@ int32_t ctgGetTableCfgFromVnode(SCatalog* pCtg, SRequestConnInfo *pConn, const S
.requestId = pConn->requestId,
.requestObjRefId = pConn->requestObjRefId,
.mgmtEps = vgroupInfo->epSet};
CTG_RET(ctgAsyncSendMsg(pCtg, &vConn, pTask, reqType, msg, msgLen));
#if CTG_BATCH_FETCH
CTG_RET(ctgAddBatch(pCtg, vgroupInfo->vgId, &vConn, pTask, reqType, msg, msgLen));
#else
SCtgTbCfgCtx* ctx = (SCtgTbCfgCtx*)pTask->taskCtx;
char dbFName[TSDB_DB_FNAME_LEN];
tNameGetFullDbName(ctx->pName, dbFName);
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
if (NULL == pTaskId) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
taosArrayPush(pTaskId, &pTask->taskId);
CTG_RET(ctgAsyncSendMsg(pCtg, &vConn, pTask->pJob, pTaskId, -1, dbFName, ctx->pVgInfo->vgId, reqType, msg, msgLen));
#endif
}
SRpcMsg rpcMsg = {
@ -832,8 +1189,17 @@ int32_t ctgGetTableCfgFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const S
if (pTask) {
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, NULL, (char*)tbFName));
#if CTG_BATCH_FETCH
CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
if (NULL == pTaskId) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
taosArrayPush(pTaskId, &pTask->taskId);
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
#endif
}
SRpcMsg rpcMsg = {
@ -868,8 +1234,18 @@ int32_t ctgGetSvrVerFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, char **ou
if (pTask) {
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, NULL, NULL));
#if CTG_BATCH_FETCH
CTG_RET(ctgAddBatch(pCtg, 0, pConn, pTask, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
if (NULL == pTaskId) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
taosArrayPush(pTaskId, &pTask->taskId);
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask->pJob, pTaskId, -1, NULL, 0, reqType, msg, msgLen));
#endif
}
SRpcMsg rpcMsg = {

View File

@ -19,6 +19,39 @@
#include "catalogInt.h"
#include "systable.h"
void ctgFreeMsgSendParam(void* param) {
if (NULL == param) {
return;
}
SCtgTaskCallbackParam* pParam = (SCtgTaskCallbackParam*)param;
taosArrayDestroy(pParam->taskId);
taosMemoryFree(param);
}
void ctgFreeBatch(SCtgBatch *pBatch) {
if (NULL == pBatch) {
return;
}
taosArrayDestroy(pBatch->pMsgs);
taosArrayDestroy(pBatch->pTaskIds);
}
void ctgFreeBatchs(SHashObj *pBatchs) {
void* p = taosHashIterate(pBatchs, NULL);
while (NULL != p) {
SCtgBatch* pBatch = (SCtgBatch*)p;
ctgFreeBatch(pBatch);
p = taosHashIterate(pBatchs, p);
}
taosHashCleanup(pBatchs);
}
char *ctgTaskTypeStr(CTG_TASK_TYPE type) {
switch (type) {
case CTG_TASK_GET_QNODE:
@ -612,6 +645,7 @@ void ctgFreeJob(void* job) {
uint64_t qid = pJob->queryId;
ctgFreeTasks(pJob->pTasks);
ctgFreeBatchs(pJob->pBatchs);
ctgFreeSMetaData(&pJob->jobRes);
@ -867,14 +901,10 @@ int32_t ctgCloneTableIndex(SArray* pIndex, SArray** pRes) {
}
int32_t ctgUpdateSendTargetInfo(SMsgSendInfo *pMsgSendInfo, int32_t msgType, SCtgTask* pTask) {
if (msgType == TDMT_VND_TABLE_META) {
SCtgTbMetaCtx* ctx = (SCtgTbMetaCtx*)pTask->taskCtx;
char dbFName[TSDB_DB_FNAME_LEN];
tNameGetFullDbName(ctx->pName, dbFName);
int32_t ctgUpdateSendTargetInfo(SMsgSendInfo *pMsgSendInfo, int32_t msgType, char* dbFName, int32_t vgId) {
if (msgType == TDMT_VND_TABLE_META || msgType == TDMT_VND_TABLE_CFG || msgType == TDMT_VND_BATCH_META) {
pMsgSendInfo->target.type = TARGET_TYPE_VNODE;
pMsgSendInfo->target.vgId = ctx->vgId;
pMsgSendInfo->target.vgId = vgId;
pMsgSendInfo->target.dbFName = strdup(dbFName);
} else {
pMsgSendInfo->target.type = TARGET_TYPE_MNODE;

View File

@ -468,7 +468,6 @@ typedef struct SStreamScanInfo {
SSDataBlock* pUpdateDataRes;
// status for tmq
// SSchemaWrapper schema;
STqOffset offset;
SNodeList* pGroupTags;
SNode* pTagCond;
SNode* pTagIndexCond;
@ -1021,6 +1020,7 @@ void copyUpdateDataBlock(SSDataBlock* pDest, SSDataBlock* pSource, int32_t tsCol
int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle, SNodeList* groupKey);
SSDataBlock* createSpecialDataBlock(EStreamType type);
void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput);
#ifdef __cplusplus
}

View File

@ -90,6 +90,8 @@ static void toDataCacheEntry(SDataDeleterHandle* pHandle, const SInputData* pInp
pRes->uidList = pHandle->pParam->pUidList;
pRes->skey = pHandle->pDeleter->deleteTimeRange.skey;
pRes->ekey = pHandle->pDeleter->deleteTimeRange.ekey;
strcpy(pRes->tableName, pHandle->pDeleter->tableFName);
strcpy(pRes->tsColName, pHandle->pDeleter->tsColName);
pRes->affectedRows = *(int64_t*)pColRes->pData;
pBuf->useSize += pEntry->dataLen;

View File

@ -193,7 +193,7 @@ SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode) {
pBlock->info.calWin = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX};
for (int32_t i = 0; i < numOfCols; ++i) {
SSlotDescNode* pDescNode = (SSlotDescNode*)nodesListGetNode(pNode->pSlots, i);
SSlotDescNode* pDescNode = (SSlotDescNode*)nodesListGetNode(pNode->pSlots, i);
SColumnInfoData idata =
createColumnInfoData(pDescNode->dataType.type, pDescNode->dataType.bytes, pDescNode->slotId);
idata.info.scale = pDescNode->dataType.scale;
@ -267,8 +267,9 @@ int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle,
code = metaGetTableEntryByUid(&mr, info->uid);
if (TSDB_CODE_SUCCESS != code) {
metaReaderClear(&mr);
*pQualified = false;
return terrno;
return TSDB_CODE_SUCCESS;
}
SNode* pTagCondTmp = nodesCloneNode(pTagCond);
@ -387,7 +388,7 @@ size_t getTableTagsBufLen(const SNodeList* pGroups) {
}
int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode, char* keyBuf, uint64_t* pGroupId) {
SMetaReader mr = {0};
SMetaReader mr = {0};
metaReaderInit(&mr, pMeta, 0);
metaGetTableEntryByUid(&mr, uid);
@ -395,7 +396,7 @@ int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode,
nodesRewriteExprsPostOrder(groupNew, doTranslateTagExpr, &mr);
char* isNull = (char*)keyBuf;
char* pStart = (char*)keyBuf + sizeof(int8_t)*LIST_LENGTH(pGroupNode);
char* pStart = (char*)keyBuf + sizeof(int8_t) * LIST_LENGTH(pGroupNode);
SNode* pNode;
int32_t index = 0;
@ -441,7 +442,7 @@ int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode,
}
}
int32_t len = (int32_t)(pStart - (char*)keyBuf);
int32_t len = (int32_t)(pStart - (char*)keyBuf);
*pGroupId = calcGroupId(keyBuf, len);
nodesDestroyList(groupNew);

View File

@ -14,12 +14,12 @@
*/
#include "executor.h"
#include "tref.h"
#include "executorimpl.h"
#include "planner.h"
#include "tdatablock.h"
#include "vnode.h"
#include "tref.h"
#include "tudf.h"
#include "vnode.h"
static TdThreadOnce initPoolOnce = PTHREAD_ONCE_INIT;
int32_t exchangeObjRefPool = -1;
@ -30,8 +30,7 @@ static void cleanupRefPool() {
taosCloseRef(ref);
}
static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, bool assignUid,
char* id) {
static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, char* id) {
ASSERT(pOperator != NULL);
if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
if (pOperator->numOfDownstream == 0) {
@ -44,12 +43,12 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
return TSDB_CODE_QRY_APP_ERROR;
}
pOperator->status = OP_NOT_OPENED;
return doSetStreamBlock(pOperator->pDownstream[0], input, numOfBlocks, type, assignUid, id);
return doSetStreamBlock(pOperator->pDownstream[0], input, numOfBlocks, type, id);
} else {
pOperator->status = OP_NOT_OPENED;
SStreamScanInfo* pInfo = pOperator->info;
pInfo->assignBlockUid = assignUid;
/*pInfo->assignBlockUid = assignUid;*/
// TODO: if a block was set but not consumed,
// prevent setting a different type of block
@ -95,21 +94,7 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
}
}
#if 0
int32_t qStreamScanSnapshot(qTaskInfo_t tinfo) {
if (tinfo == NULL) {
return TSDB_CODE_QRY_APP_ERROR;
}
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
return doSetStreamBlock(pTaskInfo->pRoot, NULL, 0, STREAM_INPUT__TABLE_SCAN, 0, NULL);
}
#endif
int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type, bool assignUid) {
return qSetMultiStreamInput(tinfo, input, 1, type, assignUid);
}
int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type, bool assignUid) {
int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type) {
if (tinfo == NULL) {
return TSDB_CODE_QRY_APP_ERROR;
}
@ -120,8 +105,7 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
int32_t code =
doSetStreamBlock(pTaskInfo->pRoot, (void**)pBlocks, numOfBlocks, type, assignUid, GET_TASKID(pTaskInfo));
int32_t code = doSetStreamBlock(pTaskInfo->pRoot, (void**)pBlocks, numOfBlocks, type, GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) {
qError("%s failed to set the stream block data", GET_TASKID(pTaskInfo));
} else {
@ -258,8 +242,8 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
}
// todo refactor STableList
size_t bufLen = (pScanInfo->pGroupTags != NULL)? getTableTagsBufLen(pScanInfo->pGroupTags):0;
char* keyBuf = NULL;
size_t bufLen = (pScanInfo->pGroupTags != NULL) ? getTableTagsBufLen(pScanInfo->pGroupTags) : 0;
char* keyBuf = NULL;
if (bufLen > 0) {
keyBuf = taosMemoryMalloc(bufLen);
if (keyBuf == NULL) {
@ -267,13 +251,13 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
}
}
for(int32_t i = 0; i < taosArrayGetSize(qa); ++i) {
for (int32_t i = 0; i < taosArrayGetSize(qa); ++i) {
uint64_t* uid = taosArrayGet(qa, i);
STableKeyInfo keyInfo = {.uid = *uid, .groupId = 0};
if (bufLen > 0) {
code = getGroupIdFromTagsVal(pScanInfo->readHandle.meta, keyInfo.uid, pScanInfo->pGroupTags, keyBuf,
&keyInfo.groupId);
&keyInfo.groupId);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@ -347,9 +331,12 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
}
code = dsCreateDataSinker(pSubplan->pDataSink, handle, pSinkParam);
if (code != TSDB_CODE_SUCCESS) {
taosMemoryFreeClear(pSinkParam);
}
}
_error:
_error:
// if failed to add ref for all tables in this query, abort current query
return code;
}
@ -573,11 +560,6 @@ const SSchemaWrapper* qExtractSchemaFromStreamScanner(void* scanner) {
return pInfo->tqReader->pSchemaWrapper;
}
const STqOffset* qExtractStatusFromStreamScanner(void* scanner) {
SStreamScanInfo* pInfo = scanner;
return &pInfo->offset;
}
void* qStreamExtractMetaMsg(qTaskInfo_t tinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
@ -600,12 +582,17 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) {
while (1) {
uint8_t type = pOperator->operatorType;
pOperator->status = OP_OPENED;
if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
SStreamScanInfo* pInfo = pOperator->info;
if (pOffset->type == TMQ_OFFSET__LOG) {
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
tsdbReaderClose(pTSInfo->dataReader);
pTSInfo->dataReader = NULL;
// TODO add more check
if (type != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
ASSERT(pOperator->numOfDownstream == 1);
pOperator = pOperator->pDownstream[0];
}
SStreamScanInfo* pInfo = pOperator->info;
if (pOffset->type == TMQ_OFFSET__LOG) {
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
tsdbReaderClose(pTSInfo->dataReader);
pTSInfo->dataReader = NULL;
#if 0
if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus) &&
pInfo->tqReader->pWalReader->curVersion != pOffset->version) {
@ -614,102 +601,74 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) {
ASSERT(0);
}
#endif
if (tqSeekVer(pInfo->tqReader, pOffset->version + 1) < 0) {
if (tqSeekVer(pInfo->tqReader, pOffset->version + 1) < 0) {
return -1;
}
ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version + 1);
} else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
/*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/
int64_t uid = pOffset->uid;
int64_t ts = pOffset->ts;
if (uid == 0) {
if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
uid = pTableInfo->uid;
ts = INT64_MIN;
} else {
return -1;
}
ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version + 1);
} else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
/*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/
int64_t uid = pOffset->uid;
int64_t ts = pOffset->ts;
}
if (uid == 0) {
if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
uid = pTableInfo->uid;
ts = INT64_MIN;
} else {
return -1;
}
}
/*if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA ||*/
/*pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) {*/
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
/*if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA ||*/
/*pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) {*/
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
#ifndef NDEBUG
qDebug("switch to next table %ld (cursor %d), %ld rows returned", uid, pTableScanInfo->currentTable,
pInfo->pTableScanOp->resultInfo.totalRows);
pInfo->pTableScanOp->resultInfo.totalRows = 0;
qDebug("switch to next table %ld (cursor %d), %ld rows returned", uid, pTableScanInfo->currentTable,
pInfo->pTableScanOp->resultInfo.totalRows);
pInfo->pTableScanOp->resultInfo.totalRows = 0;
#endif
bool found = false;
for (int32_t i = 0; i < tableSz; i++) {
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i);
if (pTableInfo->uid == uid) {
found = true;
pTableScanInfo->currentTable = i;
break;
}
bool found = false;
for (int32_t i = 0; i < tableSz; i++) {
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i);
if (pTableInfo->uid == uid) {
found = true;
pTableScanInfo->currentTable = i;
break;
}
// TODO after dropping table, table may be not found
ASSERT(found);
if (pTableScanInfo->dataReader == NULL) {
if (tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &pTableScanInfo->cond,
pTaskInfo->tableqinfoList.pTableList, &pTableScanInfo->dataReader, NULL) < 0 ||
pTableScanInfo->dataReader == NULL) {
ASSERT(0);
}
}
tsdbSetTableId(pTableScanInfo->dataReader, uid);
int64_t oldSkey = pTableScanInfo->cond.twindows.skey;
pTableScanInfo->cond.twindows.skey = ts + 1;
tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
pTableScanInfo->cond.twindows.skey = oldSkey;
pTableScanInfo->scanTimes = 0;
qDebug("tsdb reader offset seek to uid %ld ts %ld, table cur set to %d , all table num %d", uid, ts,
pTableScanInfo->currentTable, tableSz);
/*}*/
} else {
ASSERT(0);
}
return 0;
// TODO after dropping table, table may be not found
ASSERT(found);
if (pTableScanInfo->dataReader == NULL) {
if (tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &pTableScanInfo->cond,
pTaskInfo->tableqinfoList.pTableList, &pTableScanInfo->dataReader, NULL) < 0 ||
pTableScanInfo->dataReader == NULL) {
ASSERT(0);
}
}
tsdbSetTableId(pTableScanInfo->dataReader, uid);
int64_t oldSkey = pTableScanInfo->cond.twindows.skey;
pTableScanInfo->cond.twindows.skey = ts + 1;
tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
pTableScanInfo->cond.twindows.skey = oldSkey;
pTableScanInfo->scanTimes = 0;
qDebug("tsdb reader offset seek to uid %ld ts %ld, table cur set to %d , all table num %d", uid, ts,
pTableScanInfo->currentTable, tableSz);
/*}*/
} else {
ASSERT(pOperator->numOfDownstream == 1);
pOperator = pOperator->pDownstream[0];
ASSERT(0);
}
return 0;
}
}
return 0;
}
#if 0
int32_t qStreamPrepareTsdbScan(qTaskInfo_t tinfo, uint64_t uid, int64_t ts) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
if (uid == 0) {
if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
uid = pTableInfo->uid;
ts = INT64_MIN;
}
}
return doPrepareScan(pTaskInfo->pRoot, uid, ts);
}
int32_t qGetStreamScanStatus(qTaskInfo_t tinfo, uint64_t* uid, int64_t* ts) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
return doGetScanStatus(pTaskInfo->pRoot, uid, ts);
}
#endif

View File

@ -3209,9 +3209,8 @@ static void doHandleRemainBlockFromNewGroup(SFillOperatorInfo* pInfo, SResultInf
if (taosFillHasMoreResults(pInfo->pFillInfo)) {
int32_t numOfResultRows = pResultInfo->capacity - pInfo->pRes->info.rows;
taosFillResultDataBlock(pInfo->pFillInfo, pInfo->pRes, numOfResultRows);
if (pInfo->pRes->info.rows > pResultInfo->threshold) {
return;
}
pInfo->pRes->info.groupId = pInfo->curGroupId;
return;
}
// handle the cached new group data block
@ -3230,7 +3229,7 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
blockDataCleanup(pResBlock);
doHandleRemainBlockFromNewGroup(pInfo, pResultInfo, pTaskInfo);
if (pResBlock->info.rows > pResultInfo->threshold || pResBlock->info.rows > 0) {
if (pResBlock->info.rows > 0) {
pResBlock->info.groupId = pInfo->curGroupId;
return pResBlock;
}
@ -3436,7 +3435,7 @@ void initBasicInfo(SOptrBasicInfo* pInfo, SSDataBlock* pBlock) {
initResultRowInfo(&pInfo->resultRowInfo);
}
static void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
if (pCtx == NULL) {
return NULL;
}

View File

@ -83,8 +83,6 @@ SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t
} else if (pJoinNode->inputTsOrder == ORDER_DESC) {
pInfo->inputTsOrder = TSDB_ORDER_DESC;
}
//TODO: remove this when JoinNode inputTsOrder is ready
pInfo->inputTsOrder = TSDB_ORDER_ASC;
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doMergeJoin, NULL, NULL, destroyMergeJoinOperator, NULL, NULL, NULL);
@ -116,7 +114,9 @@ void destroyMergeJoinOperator(void* param, int32_t numOfOutput) {
taosMemoryFreeClear(param);
}
static void doJoinOneRow(struct SOperatorInfo* pOperator, SSDataBlock* pRes, int32_t currRow) {
static void mergeJoinJoinLeftRight(struct SOperatorInfo* pOperator, SSDataBlock* pRes, int32_t currRow,
SSDataBlock* pLeftBlock, int32_t leftPos, SSDataBlock* pRightBlock, int32_t rightPos) {
SJoinOperatorInfo* pJoinInfo = pOperator->info;
for (int32_t i = 0; i < pOperator->exprSupp.numOfExprs; ++i) {
@ -130,11 +130,11 @@ static void doJoinOneRow(struct SOperatorInfo* pOperator, SSDataBlock* pRes, int
SColumnInfoData* pSrc = NULL;
if (pJoinInfo->pLeft->info.blockId == blockId) {
pSrc = taosArrayGet(pJoinInfo->pLeft->pDataBlock, slotId);
rowIndex = pJoinInfo->leftPos;
pSrc = taosArrayGet(pLeftBlock->pDataBlock, slotId);
rowIndex = leftPos;
} else {
pSrc = taosArrayGet(pJoinInfo->pRight->pDataBlock, slotId);
rowIndex = pJoinInfo->rightPos;
pSrc = taosArrayGet(pRightBlock->pDataBlock, slotId);
rowIndex = rightPos;
}
if (colDataIsNull_s(pSrc, rowIndex)) {
@ -146,6 +146,45 @@ static void doJoinOneRow(struct SOperatorInfo* pOperator, SSDataBlock* pRes, int
}
}
static bool mergeJoinGetNextTimestamp(SOperatorInfo* pOperator, int64_t* pLeftTs, int64_t* pRightTs) {
SJoinOperatorInfo* pJoinInfo = pOperator->info;
if (pJoinInfo->pLeft == NULL || pJoinInfo->leftPos >= pJoinInfo->pLeft->info.rows) {
SOperatorInfo* ds1 = pOperator->pDownstream[0];
pJoinInfo->pLeft = ds1->fpSet.getNextFn(ds1);
pJoinInfo->leftPos = 0;
if (pJoinInfo->pLeft == NULL) {
setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED);
return false;
}
}
if (pJoinInfo->pRight == NULL || pJoinInfo->rightPos >= pJoinInfo->pRight->info.rows) {
SOperatorInfo* ds2 = pOperator->pDownstream[1];
pJoinInfo->pRight = ds2->fpSet.getNextFn(ds2);
pJoinInfo->rightPos = 0;
if (pJoinInfo->pRight == NULL) {
setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED);
return false;
}
}
// only the timestamp match support for ordinary table
SColumnInfoData* pLeftCol = taosArrayGet(pJoinInfo->pLeft->pDataBlock, pJoinInfo->leftCol.slotId);
char* pLeftVal = colDataGetData(pLeftCol, pJoinInfo->leftPos);
*pLeftTs = *(int64_t*)pLeftVal;
SColumnInfoData* pRightCol = taosArrayGet(pJoinInfo->pRight->pDataBlock, pJoinInfo->rightCol.slotId);
char* pRightVal = colDataGetData(pRightCol, pJoinInfo->rightPos);
*pRightTs = *(int64_t*)pRightVal;
ASSERT(pLeftCol->info.type == TSDB_DATA_TYPE_TIMESTAMP);
ASSERT(pRightCol->info.type == TSDB_DATA_TYPE_TIMESTAMP);
return true;
}
static void doMergeJoinImpl(struct SOperatorInfo* pOperator, SSDataBlock* pRes) {
SJoinOperatorInfo* pJoinInfo = pOperator->info;
@ -154,52 +193,27 @@ static void doMergeJoinImpl(struct SOperatorInfo* pOperator, SSDataBlock* pRes)
bool asc = (pJoinInfo->inputTsOrder == TSDB_ORDER_ASC) ? true : false;
while (1) {
// todo extract method
if (pJoinInfo->pLeft == NULL || pJoinInfo->leftPos >= pJoinInfo->pLeft->info.rows) {
SOperatorInfo* ds1 = pOperator->pDownstream[0];
pJoinInfo->pLeft = ds1->fpSet.getNextFn(ds1);
pJoinInfo->leftPos = 0;
if (pJoinInfo->pLeft == NULL) {
setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED);
break;
}
int64_t leftTs = 0;
int64_t rightTs = 0;
bool hasNextTs = mergeJoinGetNextTimestamp(pOperator, &leftTs, &rightTs);
if (!hasNextTs) {
break;
}
if (pJoinInfo->pRight == NULL || pJoinInfo->rightPos >= pJoinInfo->pRight->info.rows) {
SOperatorInfo* ds2 = pOperator->pDownstream[1];
pJoinInfo->pRight = ds2->fpSet.getNextFn(ds2);
pJoinInfo->rightPos = 0;
if (pJoinInfo->pRight == NULL) {
setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED);
break;
}
}
SColumnInfoData* pLeftCol = taosArrayGet(pJoinInfo->pLeft->pDataBlock, pJoinInfo->leftCol.slotId);
char* pLeftVal = colDataGetData(pLeftCol, pJoinInfo->leftPos);
SColumnInfoData* pRightCol = taosArrayGet(pJoinInfo->pRight->pDataBlock, pJoinInfo->rightCol.slotId);
char* pRightVal = colDataGetData(pRightCol, pJoinInfo->rightPos);
// only the timestamp match support for ordinary table
ASSERT(pLeftCol->info.type == TSDB_DATA_TYPE_TIMESTAMP);
if (*(int64_t*)pLeftVal == *(int64_t*)pRightVal) {
doJoinOneRow(pOperator, pRes, nrows);
if (leftTs == rightTs) {
mergeJoinJoinLeftRight(pOperator, pRes, nrows,
pJoinInfo->pLeft, pJoinInfo->leftPos, pJoinInfo->pRight, pJoinInfo->rightPos);
pJoinInfo->leftPos += 1;
pJoinInfo->rightPos += 1;
nrows += 1;
} else if (asc && *(int64_t*)pLeftVal < *(int64_t*)pRightVal ||
!asc && *(int64_t*)pLeftVal > *(int64_t*)pRightVal) {
} else if (asc && leftTs < rightTs || !asc && leftTs > rightTs) {
pJoinInfo->leftPos += 1;
if (pJoinInfo->leftPos >= pJoinInfo->pLeft->info.rows) {
continue;
}
} else if (asc && *(int64_t*)pLeftVal > *(int64_t*)pRightVal ||
!asc && *(int64_t*)pLeftVal < *(int64_t*)pRightVal) {
} else if (asc && leftTs > rightTs || !asc && leftTs < rightTs) {
pJoinInfo->rightPos += 1;
if (pJoinInfo->rightPos >= pJoinInfo->pRight->info.rows) {
continue;

View File

@ -292,7 +292,9 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
// do apply filter
doFilter(pProjectInfo->pFilterNode, pFinalRes, NULL);
if (pFinalRes->info.rows > 0 || pRes->info.rows == 0) {
// when apply the limit/offset for each group, pRes->info.rows may be 0, due to limit constraint.
if (pFinalRes->info.rows > 0 || (pOperator->status == OP_EXEC_DONE)) {
break;
}
} else {

View File

@ -1396,13 +1396,11 @@ SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNo
static void destroyStreamScanOperatorInfo(void* param, int32_t numOfOutput) {
SStreamScanInfo* pStreamScan = (SStreamScanInfo*)param;
#if 1
if (pStreamScan->pTableScanOp && pStreamScan->pTableScanOp->info) {
STableScanInfo* pTableScanInfo = pStreamScan->pTableScanOp->info;
destroyTableScanOperatorInfo(pTableScanInfo, numOfOutput);
taosMemoryFreeClear(pStreamScan->pTableScanOp);
}
#endif
if (pStreamScan->tqReader) {
tqCloseReader(pStreamScan->tqReader);
}
@ -1528,6 +1526,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->pDeleteDataRes = createSpecialDataBlock(STREAM_DELETE_DATA);
pInfo->updateWin = (STimeWindow){.skey = INT64_MAX, .ekey = INT64_MAX};
pInfo->pUpdateDataRes = createSpecialDataBlock(STREAM_CLEAR);
pInfo->assignBlockUid = pTableScanNode->assignBlockUid;
pOperator->name = "StreamScanOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN;
@ -2855,7 +2854,8 @@ int32_t stopGroupTableMergeScan(SOperatorInfo* pOperator) {
return TSDB_CODE_SUCCESS;
}
SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, SSDataBlock* pResBlock, int32_t capacity, SOperatorInfo* pOperator) {
SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, SSDataBlock* pResBlock, int32_t capacity,
SOperatorInfo* pOperator) {
STableMergeScanInfo* pInfo = pOperator->info;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
@ -2874,7 +2874,6 @@ SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, SSDataBlock*
}
}
qDebug("%s get sorted row blocks, rows:%d", GET_TASKID(pTaskInfo), pResBlock->info.rows);
return (pResBlock->info.rows > 0) ? pResBlock : NULL;
}
@ -2905,7 +2904,8 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
}
SSDataBlock* pBlock = NULL;
while (pInfo->tableStartIndex < tableListSize) {
pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pInfo->pResBlock, pOperator->resultInfo.capacity, pOperator);
pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pInfo->pResBlock, pOperator->resultInfo.capacity,
pOperator);
if (pBlock != NULL) {
pBlock->info.groupId = pInfo->groupId;
pOperator->resultInfo.totalRows += pBlock->info.rows;

View File

@ -773,9 +773,9 @@ int32_t binarySearch(void* keyList, int num, TSKEY key, int order, __get_value_f
}
int32_t comparePullWinKey(void* pKey, void* data, int32_t index) {
SArray* res = (SArray*)data;
SArray* res = (SArray*)data;
SPullWindowInfo* pos = taosArrayGet(res, index);
SPullWindowInfo* pData = (SPullWindowInfo*) pKey;
SPullWindowInfo* pData = (SPullWindowInfo*)pKey;
if (pData->window.skey == pos->window.skey) {
if (pData->groupId > pos->groupId) {
return 1;
@ -810,7 +810,7 @@ static int32_t savePullWindow(SPullWindowInfo* pPullInfo, SArray* pPullWins) {
int32_t compareResKey(void* pKey, void* data, int32_t index) {
SArray* res = (SArray*)data;
SResKeyPos* pos = taosArrayGetP(res, index);
SWinRes* pData = (SWinRes*) pKey;
SWinRes* pData = (SWinRes*)pKey;
if (pData->ts == *(int64_t*)pos->key) {
if (pData->groupId > pos->groupId) {
return 1;
@ -880,7 +880,7 @@ int64_t getWinReskey(void* data, int32_t index) {
int32_t compareWinRes(void* pKey, void* data, int32_t index) {
SArray* res = (SArray*)data;
SWinRes* pos = taosArrayGetP(res, index);
SResKeyPos* pData = (SResKeyPos*) pKey;
SResKeyPos* pData = (SResKeyPos*)pKey;
if (*(int64_t*)pData->key == pos->ts) {
if (pData->groupId > pos->groupId) {
return 1;
@ -1417,15 +1417,15 @@ static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval*
SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
pGpDatas = (uint64_t*)pGpCol->pData;
}
int32_t step = 0;
int32_t startPos = 0;
int32_t step = 0;
int32_t startPos = 0;
for (int32_t i = 0; i < pBlock->info.rows; i++) {
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, startTsCols[i], pInterval, TSDB_ORDER_ASC);
while (win.ekey <= endTsCols[i]) {
uint64_t winGpId = pGpDatas ? pGpDatas[startPos] : pBlock->info.groupId;
bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TSKEY), winGpId, numOfOutput);
bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TSKEY), winGpId, numOfOutput);
if (pUpWins && res) {
SWinRes winRes = {.ts = win.skey, .groupId = winGpId};
taosArrayPush(pUpWins, &winRes);
@ -1596,7 +1596,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
}
if (pBlock->info.type == STREAM_NORMAL) {
//set input version
// set input version
pTaskInfo->version = pBlock->info.version;
}
@ -1644,7 +1644,7 @@ static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput) {
}
static void freeItem(void* param) {
SGroupKeys *pKey = (SGroupKeys*) param;
SGroupKeys* pKey = (SGroupKeys*)param;
taosMemoryFree(pKey->pData);
}
@ -2347,10 +2347,10 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode
initResultSizeInfo(&pOperator->resultInfo, 4096);
pInfo->pFillColInfo = createFillColInfo(pExprInfo, numOfExprs, (SNodeListNode*)pInterpPhyNode->pFillValues);
pInfo->pRes = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
pInfo->win = pInterpPhyNode->timeRange;
pInfo->pRes = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
pInfo->win = pInterpPhyNode->timeRange;
pInfo->interval.interval = pInterpPhyNode->interval;
pInfo->current = pInfo->win.skey;
pInfo->current = pInfo->win.skey;
pOperator->name = "TimeSliceOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC;
@ -2970,8 +2970,8 @@ SSDataBlock* createSpecialDataBlock(EStreamType type) {
pBlock->info.groupId = 0;
pBlock->info.rows = 0;
pBlock->info.type = type;
pBlock->info.rowSize = sizeof(TSKEY) + sizeof(TSKEY) + sizeof(uint64_t) +
sizeof(uint64_t) + sizeof(TSKEY) + sizeof(TSKEY);
pBlock->info.rowSize =
sizeof(TSKEY) + sizeof(TSKEY) + sizeof(uint64_t) + sizeof(uint64_t) + sizeof(TSKEY) + sizeof(TSKEY);
pBlock->pDataBlock = taosArrayInit(6, sizeof(SColumnInfoData));
SColumnInfoData infoData = {0};
@ -3147,6 +3147,8 @@ void destroyStreamSessionAggOperatorInfo(void* param, int32_t numOfOutput) {
blockDataDestroy(pInfo->pDelRes);
blockDataDestroy(pInfo->pWinBlock);
blockDataDestroy(pInfo->pUpdateRes);
destroySqlFunctionCtx(pInfo->pDummyCtx, 0);
taosHashCleanup(pInfo->pStDeleted);
taosMemoryFreeClear(param);
}
@ -4323,10 +4325,10 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
} else {
return;
}
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
blockDataEnsureCapacity(pAggSup->pScanBlock, pSDataBlock->info.rows);
SColumnInfoData* pKeyColInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->stateCol.slotId);
SColumnInfoData* pKeyColInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->stateCol.slotId);
for (int32_t i = 0; i < pSDataBlock->info.rows; i += winRows) {
if (pInfo->ignoreExpiredData && isOverdue(tsCols[i], &pInfo->twAggSup)) {
i++;
@ -4341,7 +4343,7 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
&allEqual, pInfo->pSeDeleted);
if (!allEqual) {
appendOneRow(pAggSup->pScanBlock, &pCurWin->winInfo.win.skey, &pCurWin->winInfo.win.ekey,
&pSDataBlock->info.groupId);
&pSDataBlock->info.groupId);
taosHashRemove(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition));
deleteWindow(pAggSup->pCurWins, winIndex);
continue;

View File

@ -25,6 +25,7 @@ extern "C" {
typedef int32_t (*FTranslateFunc)(SFunctionNode* pFunc, char* pErrBuf, int32_t len);
typedef EFuncDataRequired (*FFuncDataRequired)(SFunctionNode* pFunc, STimeWindow* pTimeWindow);
typedef int32_t (*FCreateMergeFuncParameters)(SNodeList* pRawParameters, SNode* pPartialRes, SNodeList** pParameters);
typedef EFuncDataRequired (*FFuncDynDataRequired)(void* pRes, STimeWindow* pTimeWindow);
typedef struct SBuiltinFuncDefinition {
const char* name;
@ -32,6 +33,7 @@ typedef struct SBuiltinFuncDefinition {
uint64_t classification;
FTranslateFunc translateFunc;
FFuncDataRequired dataRequiredFunc;
FFuncDynDataRequired dynDataRequiredFunc;
FExecGetEnv getEnvFunc;
FExecInit initFunc;
FExecProcess processFunc;

View File

@ -466,7 +466,7 @@ int32_t functionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
pResInfo->isNullRes = (pResInfo->numOfRes == 0) ? 1 : 0;
pResInfo->isNullRes = (pResInfo->isNullRes == 1) ? 1 : (pResInfo->numOfRes == 0);
char* in = GET_ROWCELL_INTERBUF(pResInfo);
colDataAppend(pCol, pBlock->info.rows, in, pResInfo->isNullRes);
@ -663,7 +663,8 @@ int32_t sumFunction(SqlFunctionCtx* pCtx) {
// check for overflow
if (IS_FLOAT_TYPE(type) && (isinf(pSumRes->dsum) || isnan(pSumRes->dsum))) {
numOfElem = 0;
GET_RES_INFO(pCtx)->isNullRes = 1;
numOfElem = 1;
}
_sum_over:
@ -1605,7 +1606,7 @@ int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
int32_t currentRow = pBlock->info.rows;
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
pEntryInfo->isNullRes = (pEntryInfo->numOfRes == 0);
pEntryInfo->isNullRes = (pEntryInfo->isNullRes == 1) ? 1 : (pEntryInfo->numOfRes == 0);
if (pCol->info.type == TSDB_DATA_TYPE_FLOAT) {
float v = *(double*)&pRes->v;

View File

@ -103,6 +103,13 @@ EFuncDataRequired fmFuncDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWin
return funcMgtBuiltins[pFunc->funcId].dataRequiredFunc(pFunc, pTimeWindow);
}
EFuncDataRequired fmFuncDynDataRequired(int32_t funcId, void* pRes, STimeWindow* pTimeWindow) {
if (fmIsUserDefinedFunc(funcId) || funcId < 0 || funcId >= funcMgtBuiltinsNum) {
return TSDB_CODE_FAILED;
}
return funcMgtBuiltins[funcId].dynDataRequiredFunc(pRes, pTimeWindow);
}
int32_t fmGetFuncExecFuncs(int32_t funcId, SFuncExecFuncs* pFpSet) {
if (fmIsUserDefinedFunc(funcId) || funcId < 0 || funcId >= funcMgtBuiltinsNum) {
return TSDB_CODE_FAILED;

View File

@ -401,7 +401,8 @@ static int32_t logicVnodeModifCopy(const SVnodeModifyLogicNode* pSrc, SVnodeModi
COPY_SCALAR_FIELD(tableId);
COPY_SCALAR_FIELD(stableId);
COPY_SCALAR_FIELD(tableType);
COPY_CHAR_ARRAY_FIELD(tableFName);
COPY_CHAR_ARRAY_FIELD(tableName);
COPY_CHAR_ARRAY_FIELD(tsColName);
COPY_OBJECT_FIELD(deleteTimeRange, sizeof(STimeWindow));
CLONE_OBJECT_FIELD(pVgroupList, vgroupsInfoClone);
CLONE_NODE_LIST_FIELD(pInsertCols);

View File

@ -1712,6 +1712,7 @@ static int32_t jsonToPhysiProjectNode(const SJson* pJson, void* pObj) {
}
static const char* jkJoinPhysiPlanJoinType = "JoinType";
static const char* jkJoinPhysiPlanInputTsOrder = "InputTsOrder";
static const char* jkJoinPhysiPlanMergeCondition = "MergeCondition";
static const char* jkJoinPhysiPlanOnConditions = "OnConditions";
static const char* jkJoinPhysiPlanTargets = "Targets";
@ -1723,6 +1724,9 @@ static int32_t physiJoinNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkJoinPhysiPlanJoinType, pNode->joinType);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkJoinPhysiPlanInputTsOrder, pNode->inputTsOrder);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkJoinPhysiPlanMergeCondition, nodeToJson, pNode->pMergeCondition);
}
@ -1742,7 +1746,9 @@ static int32_t jsonToPhysiJoinNode(const SJson* pJson, void* pObj) {
int32_t code = jsonToPhysicPlanNode(pJson, pObj);
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkJoinPhysiPlanJoinType, pNode->joinType, code);
;
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkJoinPhysiPlanInputTsOrder, pNode->inputTsOrder, code);
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkJoinPhysiPlanOnConditions, &pNode->pOnConditions);
@ -2326,7 +2332,7 @@ static int32_t physiQueryInsertNodeToJson(const void* pObj, SJson* pJson) {
code = tjsonAddIntegerToObject(pJson, jkQueryInsertPhysiPlanTableType, pNode->tableType);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkQueryInsertPhysiPlanTableFName, pNode->tableFName);
code = tjsonAddStringToObject(pJson, jkQueryInsertPhysiPlanTableFName, pNode->tableName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkQueryInsertPhysiPlanVgId, pNode->vgId);
@ -2355,7 +2361,7 @@ static int32_t jsonToPhysiQueryInsertNode(const SJson* pJson, void* pObj) {
code = tjsonGetTinyIntValue(pJson, jkQueryInsertPhysiPlanTableType, &pNode->tableType);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkQueryInsertPhysiPlanTableFName, pNode->tableFName);
code = tjsonGetStringValue(pJson, jkQueryInsertPhysiPlanTableFName, pNode->tableName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetIntValue(pJson, jkQueryInsertPhysiPlanVgId, &pNode->vgId);
@ -2370,6 +2376,7 @@ static int32_t jsonToPhysiQueryInsertNode(const SJson* pJson, void* pObj) {
static const char* jkDeletePhysiPlanTableId = "TableId";
static const char* jkDeletePhysiPlanTableType = "TableType";
static const char* jkDeletePhysiPlanTableFName = "TableFName";
static const char* jkDeletePhysiPlanTsColName = "TsColName";
static const char* jkDeletePhysiPlanDeleteTimeRangeStartKey = "DeleteTimeRangeStartKey";
static const char* jkDeletePhysiPlanDeleteTimeRangeEndKey = "DeleteTimeRangeEndKey";
static const char* jkDeletePhysiPlanAffectedRows = "AffectedRows";
@ -2387,6 +2394,9 @@ static int32_t physiDeleteNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkDeletePhysiPlanTableFName, pNode->tableFName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkDeletePhysiPlanTsColName, pNode->tsColName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDeletePhysiPlanDeleteTimeRangeStartKey, pNode->deleteTimeRange.skey);
}
@ -2413,6 +2423,9 @@ static int32_t jsonToPhysiDeleteNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkDeletePhysiPlanTableFName, pNode->tableFName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkDeletePhysiPlanTsColName, pNode->tsColName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBigIntValue(pJson, jkDeletePhysiPlanDeleteTimeRangeStartKey, &pNode->deleteTimeRange.skey);
}

View File

@ -176,7 +176,6 @@ db_options(A) ::= db_options(B) CACHESIZE NK_INTEGER(C).
db_options(A) ::= db_options(B) COMP NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_COMP, &C); }
db_options(A) ::= db_options(B) DURATION NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_DAYS, &C); }
db_options(A) ::= db_options(B) DURATION NK_VARIABLE(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_DAYS, &C); }
db_options(A) ::= db_options(B) FSYNC NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_FSYNC, &C); }
db_options(A) ::= db_options(B) MAXROWS NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_MAXROWS, &C); }
db_options(A) ::= db_options(B) MINROWS NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_MINROWS, &C); }
db_options(A) ::= db_options(B) KEEP integer_list(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_KEEP, C); }
@ -186,11 +185,12 @@ db_options(A) ::= db_options(B) PAGESIZE NK_INTEGER(C).
db_options(A) ::= db_options(B) PRECISION NK_STRING(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_PRECISION, &C); }
db_options(A) ::= db_options(B) REPLICA NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_REPLICA, &C); }
db_options(A) ::= db_options(B) STRICT NK_STRING(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_STRICT, &C); }
db_options(A) ::= db_options(B) WAL NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_WAL, &C); }
db_options(A) ::= db_options(B) VGROUPS NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_VGROUPS, &C); }
db_options(A) ::= db_options(B) SINGLE_STABLE NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_SINGLE_STABLE, &C); }
db_options(A) ::= db_options(B) RETENTIONS retention_list(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_RETENTIONS, C); }
db_options(A) ::= db_options(B) SCHEMALESS NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_SCHEMALESS, &C); }
db_options(A) ::= db_options(B) WAL_LEVEL NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_WAL, &C); }
db_options(A) ::= db_options(B) WAL_FSYNC_PERIOD NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_FSYNC, &C); }
db_options(A) ::= db_options(B) WAL_RETENTION_PERIOD NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_WAL_RETENTION_PERIOD, &C); }
db_options(A) ::= db_options(B) WAL_RETENTION_PERIOD NK_MINUS(D) NK_INTEGER(C). {
SToken t = D;
@ -214,13 +214,13 @@ alter_db_options(A) ::= alter_db_options(B) alter_db_option(C).
//alter_db_option(A) ::= BUFFER NK_INTEGER(B). { A.type = DB_OPTION_BUFFER; A.val = B; }
alter_db_option(A) ::= CACHEMODEL NK_STRING(B). { A.type = DB_OPTION_CACHEMODEL; A.val = B; }
alter_db_option(A) ::= CACHESIZE NK_INTEGER(B). { A.type = DB_OPTION_CACHESIZE; A.val = B; }
alter_db_option(A) ::= FSYNC NK_INTEGER(B). { A.type = DB_OPTION_FSYNC; A.val = B; }
alter_db_option(A) ::= WAL_FSYNC_PERIOD NK_INTEGER(B). { A.type = DB_OPTION_FSYNC; A.val = B; }
alter_db_option(A) ::= KEEP integer_list(B). { A.type = DB_OPTION_KEEP; A.pList = B; }
alter_db_option(A) ::= KEEP variable_list(B). { A.type = DB_OPTION_KEEP; A.pList = B; }
//alter_db_option(A) ::= PAGES NK_INTEGER(B). { A.type = DB_OPTION_PAGES; A.val = B; }
//alter_db_option(A) ::= REPLICA NK_INTEGER(B). { A.type = DB_OPTION_REPLICA; A.val = B; }
//alter_db_option(A) ::= STRICT NK_STRING(B). { A.type = DB_OPTION_STRICT; A.val = B; }
alter_db_option(A) ::= WAL NK_INTEGER(B). { A.type = DB_OPTION_WAL; A.val = B; }
alter_db_option(A) ::= WAL_LEVEL NK_INTEGER(B). { A.type = DB_OPTION_WAL; A.val = B; }
%type integer_list { SNodeList* }
%destructor integer_list { nodesDestroyList($$); }
@ -476,7 +476,7 @@ explain_options(A) ::= explain_options(B) VERBOSE NK_BOOL(C).
explain_options(A) ::= explain_options(B) RATIO NK_FLOAT(C). { A = setExplainRatio(pCxt, B, &C); }
/************************************************ compact *************************************************************/
cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP. { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); }
//cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP. { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); }
/************************************************ create/drop function ************************************************/
cmd ::= CREATE agg_func_opt(A) FUNCTION not_exists_opt(F) function_name(B)
@ -525,7 +525,7 @@ dnode_list(A) ::= DNODE NK_INTEGER(B).
dnode_list(A) ::= dnode_list(B) DNODE NK_INTEGER(C). { A = addNodeToList(pCxt, B, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &C)); }
/************************************************ syncdb **************************************************************/
cmd ::= SYNCDB db_name(A) REPLICA. { pCxt->pRootNode = createSyncdbStmt(pCxt, &A); }
//cmd ::= SYNCDB db_name(A) REPLICA. { pCxt->pRootNode = createSyncdbStmt(pCxt, &A); }
/************************************************ syncdb **************************************************************/
cmd ::= DELETE FROM full_table_name(A) where_clause_opt(B). { pCxt->pRootNode = createDeleteStmt(pCxt, A, B); }

View File

@ -29,291 +29,224 @@ typedef struct SKeyword {
// clang-format off
// keywords in sql string
static SKeyword keywordTable[] = {
{"ACCOUNT", TK_ACCOUNT},
{"ACCOUNTS", TK_ACCOUNTS},
{"ADD", TK_ADD},
{"AGGREGATE", TK_AGGREGATE},
{"ALL", TK_ALL},
{"ALTER", TK_ALTER},
{"ANALYZE", TK_ANALYZE},
{"AND", TK_AND},
{"APPS", TK_APPS},
{"AS", TK_AS},
{"ASC", TK_ASC},
{"AT_ONCE", TK_AT_ONCE},
{"BALANCE", TK_BALANCE},
{"BETWEEN", TK_BETWEEN},
{"BINARY", TK_BINARY},
{"BIGINT", TK_BIGINT},
{"BNODE", TK_BNODE},
{"BNODES", TK_BNODES},
{"BOOL", TK_BOOL},
{"BUFFER", TK_BUFFER},
{"BUFSIZE", TK_BUFSIZE},
{"BY", TK_BY},
{"CACHE", TK_CACHE},
{"CACHEMODEL", TK_CACHEMODEL},
{"CACHESIZE", TK_CACHESIZE},
{"CAST", TK_CAST},
{"CLIENT_VERSION", TK_CLIENT_VERSION},
{"CLUSTER", TK_CLUSTER},
{"COLUMN", TK_COLUMN},
{"COMMENT", TK_COMMENT},
{"COMP", TK_COMP},
{"COMPACT", TK_COMPACT},
{"CONNS", TK_CONNS},
{"CONNECTION", TK_CONNECTION},
{"CONNECTIONS", TK_CONNECTIONS},
{"CONSUMER", TK_CONSUMER},
{"CONSUMERS", TK_CONSUMERS},
{"CONTAINS", TK_CONTAINS},
{"COUNT", TK_COUNT},
{"CREATE", TK_CREATE},
{"CURRENT_USER", TK_CURRENT_USER},
{"DATABASE", TK_DATABASE},
{"DATABASES", TK_DATABASES},
{"DBS", TK_DBS},
{"DELETE", TK_DELETE},
{"DESC", TK_DESC},
{"DESCRIBE", TK_DESCRIBE},
{"DISTINCT", TK_DISTINCT},
{"DISTRIBUTED", TK_DISTRIBUTED},
{"DNODE", TK_DNODE},
{"DNODES", TK_DNODES},
{"DOUBLE", TK_DOUBLE},
{"DROP", TK_DROP},
{"DURATION", TK_DURATION},
{"ENABLE", TK_ENABLE},
{"EXISTS", TK_EXISTS},
{"EXPIRED", TK_EXPIRED},
{"EXPLAIN", TK_EXPLAIN},
{"EVERY", TK_EVERY},
{"FILE", TK_FILE},
{"FILL", TK_FILL},
{"FIRST", TK_FIRST},
{"FLOAT", TK_FLOAT},
{"FLUSH", TK_FLUSH},
{"FROM", TK_FROM},
{"FSYNC", TK_FSYNC},
{"FUNCTION", TK_FUNCTION},
{"FUNCTIONS", TK_FUNCTIONS},
{"GRANT", TK_GRANT},
{"GRANTS", TK_GRANTS},
{"GROUP", TK_GROUP},
{"HAVING", TK_HAVING},
{"IF", TK_IF},
{"IGNORE", TK_IGNORE},
{"IMPORT", TK_IMPORT},
{"IN", TK_IN},
{"INDEX", TK_INDEX},
{"INDEXES", TK_INDEXES},
{"INNER", TK_INNER},
{"INT", TK_INT},
{"INSERT", TK_INSERT},
{"INTEGER", TK_INTEGER},
{"INTERVAL", TK_INTERVAL},
{"INTO", TK_INTO},
{"IS", TK_IS},
{"JOIN", TK_JOIN},
{"JSON", TK_JSON},
{"KEEP", TK_KEEP},
{"KILL", TK_KILL},
{"LAST", TK_LAST},
{"LAST_ROW", TK_LAST_ROW},
{"LICENCE", TK_LICENCE},
{"LIKE", TK_LIKE},
{"LIMIT", TK_LIMIT},
{"LINEAR", TK_LINEAR},
{"LOCAL", TK_LOCAL},
{"MATCH", TK_MATCH},
{"MAXROWS", TK_MAXROWS},
{"MAX_DELAY", TK_MAX_DELAY},
{"MERGE", TK_MERGE},
{"META", TK_META},
{"MINROWS", TK_MINROWS},
{"MINUS", TK_MINUS},
{"MNODE", TK_MNODE},
{"MNODES", TK_MNODES},
{"MODIFY", TK_MODIFY},
{"MODULES", TK_MODULES},
{"NCHAR", TK_NCHAR},
{"NEXT", TK_NEXT},
{"NMATCH", TK_NMATCH},
{"NONE", TK_NONE},
{"NOT", TK_NOT},
{"NOW", TK_NOW},
{"NULL", TK_NULL},
{"NULLS", TK_NULLS},
{"OFFSET", TK_OFFSET},
{"ON", TK_ON},
{"OR", TK_OR},
{"ORDER", TK_ORDER},
{"OUTPUTTYPE", TK_OUTPUTTYPE},
{"PARTITION", TK_PARTITION},
{"PASS", TK_PASS},
{"PAGES", TK_PAGES},
{"PAGESIZE", TK_PAGESIZE},
{"PORT", TK_PORT},
{"PPS", TK_PPS},
{"PRECISION", TK_PRECISION},
// {"PRIVILEGE", TK_PRIVILEGE},
{"PREV", TK_PREV},
{"QNODE", TK_QNODE},
{"QNODES", TK_QNODES},
{"QTIME", TK_QTIME},
{"QUERIES", TK_QUERIES},
{"QUERY", TK_QUERY},
{"RANGE", TK_RANGE},
{"RATIO", TK_RATIO},
{"READ", TK_READ},
{"REDISTRIBUTE", TK_REDISTRIBUTE},
{"RENAME", TK_RENAME},
{"REPLICA", TK_REPLICA},
{"RESET", TK_RESET},
{"RETENTIONS", TK_RETENTIONS},
{"REVOKE", TK_REVOKE},
{"ROLLUP", TK_ROLLUP},
{"SCHEMALESS", TK_SCHEMALESS},
{"SCORES", TK_SCORES},
{"SELECT", TK_SELECT},
{"SERVER_STATUS", TK_SERVER_STATUS},
{"SERVER_VERSION", TK_SERVER_VERSION},
{"SESSION", TK_SESSION},
{"SET", TK_SET},
{"SHOW", TK_SHOW},
{"SINGLE_STABLE", TK_SINGLE_STABLE},
{"SLIDING", TK_SLIDING},
{"SLIMIT", TK_SLIMIT},
{"SMA", TK_SMA},
{"SMALLINT", TK_SMALLINT},
{"SNODE", TK_SNODE},
{"SNODES", TK_SNODES},
{"SOFFSET", TK_SOFFSET},
{"SPLIT", TK_SPLIT},
{"STABLE", TK_STABLE},
{"STABLES", TK_STABLES},
{"STATE", TK_STATE},
{"STATE_WINDOW", TK_STATE_WINDOW},
{"STORAGE", TK_STORAGE},
{"STREAM", TK_STREAM},
{"STREAMS", TK_STREAMS},
{"STRICT", TK_STRICT},
{"SUBSCRIPTIONS", TK_SUBSCRIPTIONS},
{"SYNCDB", TK_SYNCDB},
{"SYSINFO", TK_SYSINFO},
{"TABLE", TK_TABLE},
{"TABLES", TK_TABLES},
{"TAG", TK_TAG},
{"TAGS", TK_TAGS},
{"TBNAME", TK_TBNAME},
{"TIMESTAMP", TK_TIMESTAMP},
{"TIMEZONE", TK_TIMEZONE},
{"TINYINT", TK_TINYINT},
{"TO", TK_TO},
{"TODAY", TK_TODAY},
{"TOPIC", TK_TOPIC},
{"TOPICS", TK_TOPICS},
{"TRANSACTION", TK_TRANSACTION},
{"TRANSACTIONS", TK_TRANSACTIONS},
{"TRIGGER", TK_TRIGGER},
{"TRIM", TK_TRIM},
{"TSERIES", TK_TSERIES},
{"TTL", TK_TTL},
{"UNION", TK_UNION},
{"UNSIGNED", TK_UNSIGNED},
{"USE", TK_USE},
{"USER", TK_USER},
{"USERS", TK_USERS},
{"USING", TK_USING},
{"VALUE", TK_VALUE},
{"VALUES", TK_VALUES},
{"VARCHAR", TK_VARCHAR},
{"VARIABLES", TK_VARIABLES},
{"VERBOSE", TK_VERBOSE},
{"VGROUP", TK_VGROUP},
{"VGROUPS", TK_VGROUPS},
{"VNODES", TK_VNODES},
{"WAL", TK_WAL},
{"ACCOUNT", TK_ACCOUNT},
{"ACCOUNTS", TK_ACCOUNTS},
{"ADD", TK_ADD},
{"AGGREGATE", TK_AGGREGATE},
{"ALL", TK_ALL},
{"ALTER", TK_ALTER},
{"ANALYZE", TK_ANALYZE},
{"AND", TK_AND},
{"APPS", TK_APPS},
{"AS", TK_AS},
{"ASC", TK_ASC},
{"AT_ONCE", TK_AT_ONCE},
{"BALANCE", TK_BALANCE},
{"BETWEEN", TK_BETWEEN},
{"BINARY", TK_BINARY},
{"BIGINT", TK_BIGINT},
{"BNODE", TK_BNODE},
{"BNODES", TK_BNODES},
{"BOOL", TK_BOOL},
{"BUFFER", TK_BUFFER},
{"BUFSIZE", TK_BUFSIZE},
{"BY", TK_BY},
{"CACHE", TK_CACHE},
{"CACHEMODEL", TK_CACHEMODEL},
{"CACHESIZE", TK_CACHESIZE},
{"CAST", TK_CAST},
{"CLIENT_VERSION", TK_CLIENT_VERSION},
{"CLUSTER", TK_CLUSTER},
{"COLUMN", TK_COLUMN},
{"COMMENT", TK_COMMENT},
{"COMP", TK_COMP},
{"CONNS", TK_CONNS},
{"CONNECTION", TK_CONNECTION},
{"CONNECTIONS", TK_CONNECTIONS},
{"CONSUMER", TK_CONSUMER},
{"CONSUMERS", TK_CONSUMERS},
{"CONTAINS", TK_CONTAINS},
{"COUNT", TK_COUNT},
{"CREATE", TK_CREATE},
{"CURRENT_USER", TK_CURRENT_USER},
{"DATABASE", TK_DATABASE},
{"DATABASES", TK_DATABASES},
{"DBS", TK_DBS},
{"DELETE", TK_DELETE},
{"DESC", TK_DESC},
{"DESCRIBE", TK_DESCRIBE},
{"DISTINCT", TK_DISTINCT},
{"DISTRIBUTED", TK_DISTRIBUTED},
{"DNODE", TK_DNODE},
{"DNODES", TK_DNODES},
{"DOUBLE", TK_DOUBLE},
{"DROP", TK_DROP},
{"DURATION", TK_DURATION},
{"ENABLE", TK_ENABLE},
{"EXISTS", TK_EXISTS},
{"EXPIRED", TK_EXPIRED},
{"EXPLAIN", TK_EXPLAIN},
{"EVERY", TK_EVERY},
{"FILE", TK_FILE},
{"FILL", TK_FILL},
{"FIRST", TK_FIRST},
{"FLOAT", TK_FLOAT},
{"FLUSH", TK_FLUSH},
{"FROM", TK_FROM},
{"FUNCTION", TK_FUNCTION},
{"FUNCTIONS", TK_FUNCTIONS},
{"GRANT", TK_GRANT},
{"GRANTS", TK_GRANTS},
{"GROUP", TK_GROUP},
{"HAVING", TK_HAVING},
{"IF", TK_IF},
{"IGNORE", TK_IGNORE},
{"IMPORT", TK_IMPORT},
{"IN", TK_IN},
{"INDEX", TK_INDEX},
{"INDEXES", TK_INDEXES},
{"INNER", TK_INNER},
{"INT", TK_INT},
{"INSERT", TK_INSERT},
{"INTEGER", TK_INTEGER},
{"INTERVAL", TK_INTERVAL},
{"INTO", TK_INTO},
{"IS", TK_IS},
{"JOIN", TK_JOIN},
{"JSON", TK_JSON},
{"KEEP", TK_KEEP},
{"KILL", TK_KILL},
{"LAST", TK_LAST},
{"LAST_ROW", TK_LAST_ROW},
{"LICENCE", TK_LICENCE},
{"LIKE", TK_LIKE},
{"LIMIT", TK_LIMIT},
{"LINEAR", TK_LINEAR},
{"LOCAL", TK_LOCAL},
{"MATCH", TK_MATCH},
{"MAXROWS", TK_MAXROWS},
{"MAX_DELAY", TK_MAX_DELAY},
{"MERGE", TK_MERGE},
{"META", TK_META},
{"MINROWS", TK_MINROWS},
{"MINUS", TK_MINUS},
{"MNODE", TK_MNODE},
{"MNODES", TK_MNODES},
{"MODIFY", TK_MODIFY},
{"MODULES", TK_MODULES},
{"NCHAR", TK_NCHAR},
{"NEXT", TK_NEXT},
{"NMATCH", TK_NMATCH},
{"NONE", TK_NONE},
{"NOT", TK_NOT},
{"NOW", TK_NOW},
{"NULL", TK_NULL},
{"NULLS", TK_NULLS},
{"OFFSET", TK_OFFSET},
{"ON", TK_ON},
{"OR", TK_OR},
{"ORDER", TK_ORDER},
{"OUTPUTTYPE", TK_OUTPUTTYPE},
{"PARTITION", TK_PARTITION},
{"PASS", TK_PASS},
{"PAGES", TK_PAGES},
{"PAGESIZE", TK_PAGESIZE},
{"PORT", TK_PORT},
{"PPS", TK_PPS},
{"PRECISION", TK_PRECISION},
{"PREV", TK_PREV},
{"QNODE", TK_QNODE},
{"QNODES", TK_QNODES},
{"QTIME", TK_QTIME},
{"QUERIES", TK_QUERIES},
{"QUERY", TK_QUERY},
{"RANGE", TK_RANGE},
{"RATIO", TK_RATIO},
{"READ", TK_READ},
{"REDISTRIBUTE", TK_REDISTRIBUTE},
{"RENAME", TK_RENAME},
{"REPLICA", TK_REPLICA},
{"RESET", TK_RESET},
{"RETENTIONS", TK_RETENTIONS},
{"REVOKE", TK_REVOKE},
{"ROLLUP", TK_ROLLUP},
{"SCHEMALESS", TK_SCHEMALESS},
{"SCORES", TK_SCORES},
{"SELECT", TK_SELECT},
{"SERVER_STATUS", TK_SERVER_STATUS},
{"SERVER_VERSION", TK_SERVER_VERSION},
{"SESSION", TK_SESSION},
{"SET", TK_SET},
{"SHOW", TK_SHOW},
{"SINGLE_STABLE", TK_SINGLE_STABLE},
{"SLIDING", TK_SLIDING},
{"SLIMIT", TK_SLIMIT},
{"SMA", TK_SMA},
{"SMALLINT", TK_SMALLINT},
{"SNODE", TK_SNODE},
{"SNODES", TK_SNODES},
{"SOFFSET", TK_SOFFSET},
{"SPLIT", TK_SPLIT},
{"STABLE", TK_STABLE},
{"STABLES", TK_STABLES},
{"STATE", TK_STATE},
{"STATE_WINDOW", TK_STATE_WINDOW},
{"STORAGE", TK_STORAGE},
{"STREAM", TK_STREAM},
{"STREAMS", TK_STREAMS},
{"STRICT", TK_STRICT},
{"SUBSCRIPTIONS", TK_SUBSCRIPTIONS},
{"SYSINFO", TK_SYSINFO},
{"TABLE", TK_TABLE},
{"TABLES", TK_TABLES},
{"TAG", TK_TAG},
{"TAGS", TK_TAGS},
{"TBNAME", TK_TBNAME},
{"TIMESTAMP", TK_TIMESTAMP},
{"TIMEZONE", TK_TIMEZONE},
{"TINYINT", TK_TINYINT},
{"TO", TK_TO},
{"TODAY", TK_TODAY},
{"TOPIC", TK_TOPIC},
{"TOPICS", TK_TOPICS},
{"TRANSACTION", TK_TRANSACTION},
{"TRANSACTIONS", TK_TRANSACTIONS},
{"TRIGGER", TK_TRIGGER},
{"TRIM", TK_TRIM},
{"TSERIES", TK_TSERIES},
{"TTL", TK_TTL},
{"UNION", TK_UNION},
{"UNSIGNED", TK_UNSIGNED},
{"USE", TK_USE},
{"USER", TK_USER},
{"USERS", TK_USERS},
{"USING", TK_USING},
{"VALUE", TK_VALUE},
{"VALUES", TK_VALUES},
{"VARCHAR", TK_VARCHAR},
{"VARIABLES", TK_VARIABLES},
{"VERBOSE", TK_VERBOSE},
{"VGROUP", TK_VGROUP},
{"VGROUPS", TK_VGROUPS},
{"WAL_FSYNC_PERIOD", TK_WAL_FSYNC_PERIOD},
{"WAL_LEVEL", TK_WAL_LEVEL},
{"WAL_RETENTION_PERIOD", TK_WAL_RETENTION_PERIOD},
{"WAL_RETENTION_SIZE", TK_WAL_RETENTION_SIZE},
{"WAL_ROLL_PERIOD", TK_WAL_ROLL_PERIOD},
{"WAL_SEGMENT_SIZE", TK_WAL_SEGMENT_SIZE},
{"WATERMARK", TK_WATERMARK},
{"WHERE", TK_WHERE},
{"WINDOW_CLOSE", TK_WINDOW_CLOSE},
{"WITH", TK_WITH},
{"WRITE", TK_WRITE},
{"_C0", TK_ROWTS},
{"_QDURATION", TK_QDURATION},
{"_QEND", TK_QEND},
{"_QSTART", TK_QSTART},
{"_ROWTS", TK_ROWTS},
{"_WDURATION", TK_WDURATION},
{"_WEND", TK_WEND},
{"_WSTART", TK_WSTART},
// {"ID", TK_ID},
// {"STRING", TK_STRING},
// {"EQ", TK_EQ},
// {"NE", TK_NE},
// {"ISNULL", TK_ISNULL},
// {"NOTNULL", TK_NOTNULL},
// {"GLOB", TK_GLOB},
// {"GT", TK_GT},
// {"GE", TK_GE},
// {"LT", TK_LT},
// {"LE", TK_LE},
// {"BITAND", TK_BITAND},
// {"BITOR", TK_BITOR},
// {"LSHIFT", TK_LSHIFT},
// {"RSHIFT", TK_RSHIFT},
// {"PLUS", TK_PLUS},
// {"DIVIDE", TK_DIVIDE},
// {"TIMES", TK_TIMES},
// {"STAR", TK_STAR},
// {"SLASH", TK_SLASH},
// {"REM ", TK_REM},
// {"||", TK_CONCAT},
// {"UMINUS", TK_UMINUS},
// {"UPLUS", TK_UPLUS},
// {"BITNOT", TK_BITNOT},
// {"DOT", TK_DOT},
// {"CTIME", TK_CTIME},
// {"LP", TK_LP},
// {"RP", TK_RP},
// {"COMMA", TK_COMMA},
// {"VARIABLE", TK_VARIABLE},
// {"UPDATE", TK_UPDATE},
// {"CHANGE", TK_CHANGE},
// {"COLON", TK_COLON},
// {"ABORT", TK_ABORT},
// {"AFTER", TK_AFTER},
// {"ATTACH", TK_ATTACH},
// {"BEFORE", TK_BEFORE},
// {"BEGIN", TK_BEGIN},
// {"CASCADE", TK_CASCADE},
// {"CONFLICT", TK_CONFLICT},
// {"COPY", TK_COPY},
// {"DEFERRED", TK_DEFERRED},
// {"DELIMITERS", TK_DELIMITERS},
// {"DETACH", TK_DETACH},
// {"EACH", TK_EACH},
// {"END", TK_END},
// {"FAIL", TK_FAIL},
// {"FOR", TK_FOR},
// {"IMMEDIATE", TK_IMMEDIATE},
// {"INITIALLY", TK_INITIALLY},
// {"INSTEAD", TK_INSTEAD},
// {"KEY", TK_KEY},
// {"OF", TK_OF},
// {"RAISE", TK_RAISE},
// {"REPLACE", TK_REPLACE},
// {"RESTRICT", TK_RESTRICT},
// {"ROW", TK_ROW},
// {"STATEMENT", TK_STATEMENT},
// {"VIEW", TK_VIEW},
// {"SEMI", TK_SEMI},
// {"PARTITIONS", TK_PARTITIONS},
// {"MODE", TK_MODE},
{"WATERMARK", TK_WATERMARK},
{"WHERE", TK_WHERE},
{"WINDOW_CLOSE", TK_WINDOW_CLOSE},
{"WITH", TK_WITH},
{"WRITE", TK_WRITE},
{"_C0", TK_ROWTS},
{"_QDURATION", TK_QDURATION},
{"_QEND", TK_QEND},
{"_QSTART", TK_QSTART},
{"_ROWTS", TK_ROWTS},
{"_WDURATION", TK_WDURATION},
{"_WEND", TK_WEND},
{"_WSTART", TK_WSTART},
};
// clang-format on

View File

@ -39,6 +39,7 @@ typedef struct STranslateContext {
SCmdMsgInfo* pCmdMsg;
SHashObj* pDbs;
SHashObj* pTables;
SHashObj* pTargetTables;
SExplainOptions* pExplainOpt;
SParseMetaCache* pMetaCache;
bool createStream;
@ -89,10 +90,10 @@ static int32_t collectUseDatabase(const SName* pName, SHashObj* pDbs) {
return collectUseDatabaseImpl(dbFName, pDbs);
}
static int32_t collectUseTable(const SName* pName, SHashObj* pDbs) {
static int32_t collectUseTable(const SName* pName, SHashObj* pTable) {
char fullName[TSDB_TABLE_FNAME_LEN];
tNameExtractFullName(pName, fullName);
return taosHashPut(pDbs, fullName, strlen(fullName), pName, sizeof(SName));
return taosHashPut(pTable, fullName, strlen(fullName), pName, sizeof(SName));
}
static int32_t getTableMetaImpl(STranslateContext* pCxt, const SName* pName, STableMeta** pMeta) {
@ -357,7 +358,8 @@ static int32_t initTranslateContext(SParseContext* pParseCxt, SParseMetaCache* p
pCxt->pMetaCache = pMetaCache;
pCxt->pDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
pCxt->pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
if (NULL == pCxt->pNsLevel || NULL == pCxt->pDbs || NULL == pCxt->pTables) {
pCxt->pTargetTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
if (NULL == pCxt->pNsLevel || NULL == pCxt->pDbs || NULL == pCxt->pTables || NULL == pCxt->pTargetTables) {
return TSDB_CODE_OUT_OF_MEMORY;
}
return TSDB_CODE_SUCCESS;
@ -3188,7 +3190,8 @@ static int32_t checkOptionsDependency(STranslateContext* pCxt, const char* pDbNa
daysToKeep0 = (-1 == daysToKeep0 ? dbCfg.daysToKeep0 : daysToKeep0);
}
if (daysPerFile > daysToKeep0) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DAYS_VALUE);
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DB_OPTION,
"Invalid duration value, should be keep2 >= keep1 >= keep0 >= duration");
}
return TSDB_CODE_SUCCESS;
}
@ -3933,6 +3936,9 @@ static int32_t buildCreateStbReq(STranslateContext* pCxt, SCreateTableStmt* pStm
SName tableName;
tNameExtractFullName(toName(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, &tableName), pReq->name);
int32_t code = collectUseTable(&tableName, pCxt->pTables);
if (TSDB_CODE_SUCCESS == code) {
code = collectUseTable(&tableName, pCxt->pTargetTables);
}
if (TSDB_CODE_SUCCESS == code) {
code = buildRollupAst(pCxt, pStmt, pReq);
}
@ -3953,11 +3959,14 @@ static int32_t translateCreateSuperTable(STranslateContext* pCxt, SCreateTableSt
}
static int32_t doTranslateDropSuperTable(STranslateContext* pCxt, const SName* pTableName, bool ignoreNotExists) {
SMDropStbReq dropReq = {0};
tNameExtractFullName(pTableName, dropReq.name);
dropReq.igNotExists = ignoreNotExists;
return buildCmdMsg(pCxt, TDMT_MND_DROP_STB, (FSerializeFunc)tSerializeSMDropStbReq, &dropReq);
int32_t code = collectUseTable(pTableName, pCxt->pTargetTables);
if (TSDB_CODE_SUCCESS == code) {
SMDropStbReq dropReq = {0};
tNameExtractFullName(pTableName, dropReq.name);
dropReq.igNotExists = ignoreNotExists;
code = buildCmdMsg(pCxt, TDMT_MND_DROP_STB, (FSerializeFunc)tSerializeSMDropStbReq, &dropReq);
}
return code;
}
static int32_t translateDropTable(STranslateContext* pCxt, SDropTableStmt* pStmt) {
@ -5559,8 +5568,13 @@ static int32_t rewriteCreateTable(STranslateContext* pCxt, SQuery* pQuery) {
int32_t code = checkCreateTable(pCxt, pStmt, false);
SVgroupInfo info = {0};
SName name;
toName(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, &name);
if (TSDB_CODE_SUCCESS == code) {
code = getTableHashVgroup(pCxt, pStmt->dbName, pStmt->tableName, &info);
code = getTableHashVgroupImpl(pCxt, &name, &info);
}
if (TSDB_CODE_SUCCESS == code) {
code = collectUseTable(&name, pCxt->pTargetTables);
}
SArray* pBufArray = NULL;
if (TSDB_CODE_SUCCESS == code) {
@ -5829,6 +5843,11 @@ static int32_t rewriteCreateSubTable(STranslateContext* pCxt, SCreateSubTableCla
if (TSDB_CODE_SUCCESS == code) {
code = getTableMeta(pCxt, pStmt->useDbName, pStmt->useTableName, &pSuperTableMeta);
}
if (TSDB_CODE_SUCCESS == code) {
SName name;
toName(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, &name);
code = collectUseTable(&name, pCxt->pTargetTables);
}
STag* pTag = NULL;
SArray* tagName = taosArrayInit(8, TSDB_COL_NAME_LEN);
@ -5927,8 +5946,13 @@ static void addDropTbReqIntoVgroup(SHashObj* pVgroupHashmap, SDropTableClause* p
static int32_t buildDropTableVgroupHashmap(STranslateContext* pCxt, SDropTableClause* pClause, bool* pIsSuperTable,
SHashObj* pVgroupHashmap) {
SName name;
toName(pCxt->pParseCxt->acctId, pClause->dbName, pClause->tableName, &name);
STableMeta* pTableMeta = NULL;
int32_t code = getTableMeta(pCxt, pClause->dbName, pClause->tableName, &pTableMeta);
int32_t code = getTableMetaImpl(pCxt, &name, &pTableMeta);
if (TSDB_CODE_SUCCESS == code) {
code = collectUseTable(&name, pCxt->pTargetTables);
}
if (TSDB_CODE_SUCCESS == code && TSDB_SUPER_TABLE == pTableMeta->tableType) {
*pIsSuperTable = true;
@ -6509,6 +6533,20 @@ static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery) {
pTable = taosHashIterate(pCxt->pTables, pTable);
}
}
if (NULL != pCxt->pTargetTables) {
taosArrayDestroy(pQuery->pTargetTableList);
pQuery->pTargetTableList = taosArrayInit(taosHashGetSize(pCxt->pTargetTables), sizeof(SName));
if (NULL == pQuery->pTargetTableList) {
return TSDB_CODE_OUT_OF_MEMORY;
}
SName* pTable = taosHashIterate(pCxt->pTargetTables, NULL);
while (NULL != pTable) {
taosArrayPush(pQuery->pTargetTableList, pTable);
pTable = taosHashIterate(pCxt->pTargetTables, pTable);
}
}
return TSDB_CODE_SUCCESS;
}

View File

@ -99,8 +99,6 @@ static char* getSyntaxErrFormat(int32_t errCode) {
return "Query block has incorrect number of result columns";
case TSDB_CODE_PAR_INCORRECT_TIMESTAMP_VAL:
return "Incorrect TIMESTAMP value: %s";
case TSDB_CODE_PAR_INVALID_DAYS_VALUE:
return "Invalid days value, should be keep2 >= keep1 >= keep0 >= days";
case TSDB_CODE_PAR_OFFSET_LESS_ZERO:
return "soffset/offset can not be less than 0";
case TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_BY:

File diff suppressed because it is too large Load Diff

View File

@ -82,12 +82,12 @@ TEST_F(ParserInitialATest, alterDnode) {
* BUFFER int_value -- todo: range [3, 16384], default 96, unit MB
* | CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'} -- default 'none'
* | CACHESIZE int_value -- range [1, 65536], default 1, unit MB
* | FSYNC int_value -- rang [0, 180000], default 3000, unit ms
* | WAL_FSYNC_PERIOD int_value -- rang [0, 180000], default 3000, unit ms
* | KEEP {int_value | duration_value} -- rang [1, 365000], default 3650, unit day
* | PAGES int_value -- todo: rang [64, +oo), default 256, unit page
* | REPLICA int_value -- todo: enum 1, 3, default 1, unit replica
* | STRICT {'off' | 'on'} -- todo: default 'off'
* | WAL int_value -- enum 1, 2, default 1
* | WAL_LEVEL int_value -- enum 1, 2, default 1
* }
*/
TEST_F(ParserInitialATest, alterDatabase) {
@ -157,7 +157,7 @@ TEST_F(ParserInitialATest, alterDatabase) {
setAlterDbFsync(200);
setAlterDbWal(1);
setAlterDbCacheModel(TSDB_CACHE_MODEL_LAST_ROW);
run("ALTER DATABASE test CACHEMODEL 'last_row' CACHESIZE 32 FSYNC 200 KEEP 10 WAL 1");
run("ALTER DATABASE test CACHEMODEL 'last_row' CACHESIZE 32 WAL_FSYNC_PERIOD 200 KEEP 10 WAL_LEVEL 1");
clearAlterDbReq();
initAlterDb("test");
@ -182,11 +182,11 @@ TEST_F(ParserInitialATest, alterDatabase) {
initAlterDb("test");
setAlterDbFsync(0);
run("ALTER DATABASE test FSYNC 0");
run("ALTER DATABASE test WAL_FSYNC_PERIOD 0");
setAlterDbFsync(1000);
run("ALTER DATABASE test FSYNC 1000");
run("ALTER DATABASE test WAL_FSYNC_PERIOD 1000");
setAlterDbFsync(180000);
run("ALTER DATABASE test FSYNC 180000");
run("ALTER DATABASE test WAL_FSYNC_PERIOD 180000");
clearAlterDbReq();
initAlterDb("test");
@ -210,9 +210,9 @@ TEST_F(ParserInitialATest, alterDatabase) {
initAlterDb("test");
setAlterDbWal(1);
run("ALTER DATABASE test WAL 1");
run("ALTER DATABASE test WAL_LEVEL 1");
setAlterDbWal(2);
run("ALTER DATABASE test WAL 2");
run("ALTER DATABASE test WAL_LEVEL 2");
clearAlterDbReq();
}
@ -223,16 +223,16 @@ TEST_F(ParserInitialATest, alterDatabaseSemanticCheck) {
run("ALTER DATABASE test CACHESIZE 0", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test CACHESIZE 65537", TSDB_CODE_PAR_INVALID_DB_OPTION);
// The syntax limits it to only positive numbers
run("ALTER DATABASE test FSYNC -1", TSDB_CODE_PAR_SYNTAX_ERROR, PARSER_STAGE_PARSE);
run("ALTER DATABASE test FSYNC 180001", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test WAL_FSYNC_PERIOD -1", TSDB_CODE_PAR_SYNTAX_ERROR, PARSER_STAGE_PARSE);
run("ALTER DATABASE test WAL_FSYNC_PERIOD 180001", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test KEEP 0", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test KEEP 365001", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test KEEP 1000000000s", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test KEEP 1w", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test WAL 0", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test WAL 3", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test WAL_LEVEL 0", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test WAL_LEVEL 3", TSDB_CODE_PAR_INVALID_DB_OPTION);
// Regardless of the specific sentence
run("ALTER DATABASE db WAL 0 # td-14436", TSDB_CODE_PAR_SYNTAX_ERROR, PARSER_STAGE_PARSE);
run("ALTER DATABASE db WAL_LEVEL 0 # td-14436", TSDB_CODE_PAR_SYNTAX_ERROR, PARSER_STAGE_PARSE);
}
/*

Some files were not shown because too many files have changed in this diff Show More