Merge branch '3.0' of https://github.com/taosdata/TDengine into fix/hzcheng_3.0
This commit is contained in:
commit
cace6bfca7
|
@ -0,0 +1,13 @@
|
|||
|
||||
# zlib
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG df8678f
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -0,0 +1,13 @@
|
|||
|
||||
# zlib
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 817cb6a
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -0,0 +1,13 @@
|
|||
|
||||
# zlib
|
||||
ExternalProject_Add(taosws-rs
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosws-rs.git
|
||||
GIT_TAG 9de599d
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -9,6 +9,24 @@ endfunction(cat IN_FILE OUT_FILE)
|
|||
set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
||||
# taos-tools
|
||||
if(${BUILD_TOOLS})
|
||||
cat("${TD_SUPPORT_DIR}/taostools_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
||||
# taosws-rs
|
||||
if(${WEBSOCKET})
|
||||
cat("${TD_SUPPORT_DIR}/taosws_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
||||
# taosadapter
|
||||
if(${BUILD_HTTP})
|
||||
MESSAGE("BUILD_HTTP is on")
|
||||
else ()
|
||||
MESSAGE("BUILD_HTTP is off, use taosAdapter")
|
||||
cat("${TD_SUPPORT_DIR}/taosadapter_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
||||
# pthread
|
||||
if(${BUILD_PTHREAD})
|
||||
cat("${TD_SUPPORT_DIR}/pthread_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
|
|
@ -8,10 +8,10 @@ description: "创建、删除数据库,查看、修改数据库参数"
|
|||
|
||||
```sql
|
||||
CREATE DATABASE [IF NOT EXISTS] db_name [database_options]
|
||||
|
||||
|
||||
database_options:
|
||||
database_option ...
|
||||
|
||||
|
||||
database_option: {
|
||||
BUFFER value
|
||||
| CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'}
|
||||
|
@ -39,41 +39,42 @@ database_option: {
|
|||
```
|
||||
|
||||
### 参数说明
|
||||
- buffer: 一个 VNODE 写入内存池大小,单位为MB,默认为96,最小为3,最大为16384。
|
||||
- CACHEMODEL:表示是否在内存中缓存子表的最近数据。默认为none。
|
||||
- none:表示不缓存。
|
||||
- last_row:表示缓存子表最近一行数据。这将显著改善 LAST_ROW 函数的性能表现。
|
||||
- last_value:表示缓存子表每一列的最近的非 NULL 值。这将显著改善无特殊影响(WHERE、ORDER BY、GROUP BY、INTERVAL)下的 LAST 函数的性能表现。
|
||||
- both:表示同时打开缓存最近行和列功能。
|
||||
|
||||
- buffer: 一个 VNODE 写入内存池大小,单位为 MB,默认为 96,最小为 3,最大为 16384。
|
||||
- CACHEMODEL:表示是否在内存中缓存子表的最近数据。默认为 none。
|
||||
- none:表示不缓存。
|
||||
- last_row:表示缓存子表最近一行数据。这将显著改善 LAST_ROW 函数的性能表现。
|
||||
- last_value:表示缓存子表每一列的最近的非 NULL 值。这将显著改善无特殊影响(WHERE、ORDER BY、GROUP BY、INTERVAL)下的 LAST 函数的性能表现。
|
||||
- both:表示同时打开缓存最近行和列功能。
|
||||
- CACHESIZE:表示缓存子表最近数据的内存大小。默认为 1 ,范围是[1, 65536],单位是 MB。
|
||||
- COMP:表示数据库文件压缩标志位,缺省值为 2,取值范围为 [0, 2]。
|
||||
- 0:表示不压缩。
|
||||
- 1:表示一阶段压缩。
|
||||
- 2:表示两阶段压缩。
|
||||
- DURATION:数据文件存储数据的时间跨度。可以使用加单位的表示形式,如 DURATION 100h、DURATION 10d等,支持 m(分钟)、h(小时)和 d(天)三个单位。不加时间单位时默认单位为天,如 DURATION 50 表示 50 天。
|
||||
- FSYNC:当 WAL 参数设置为2时,落盘的周期。默认为3000,单位毫秒。最小为0,表示每次写入立即落盘;最大为180000,即三分钟。
|
||||
- MAXROWS:文件块中记录的最大条数,默认为4096条。
|
||||
- MINROWS:文件块中记录的最小条数,默认为100条。
|
||||
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于 DURATION 参数值。数据库会自动删除保存时间超过KEEP值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。
|
||||
- PAGES:一个 VNODE 中元数据存储引擎的缓存页个数,默认为256,最小64。一个 VNODE 元数据存储占用 PAGESIZE * PAGES,默认情况下为1MB内存。
|
||||
- PAGESIZE:一个 VNODE 中元数据存储引擎的页大小,单位为KB,默认为4 KB。范围为1到16384,即1 KB到16 MB。
|
||||
- PRECISION:数据库的时间戳精度。ms表示毫秒,us表示微秒,ns表示纳秒,默认ms毫秒。
|
||||
- REPLICA:表示数据库副本数,取值为1或3,默认为1。在集群中使用,副本数必须小于或等于 DNODE 的数目。
|
||||
- RETENTIONS:表示数据的聚合周期和保存时长,如RETENTIONS 15s:7d,1m:21d,15m:50d表示数据原始采集周期为15秒,原始数据保存7天;按1分钟聚合的数据保存21天;按15分钟聚合的数据保存50天。目前支持且只支持三级存储周期。
|
||||
- STRICT:表示数据同步的一致性要求,默认为off。
|
||||
- on 表示强一致,即运行标准的 raft 协议,半数提交返回成功。
|
||||
- off表示弱一致,本地提交即返回成功。
|
||||
- WAL:WAL级别,默认为1。
|
||||
- 1:写WAL,但不执行fsync。
|
||||
- 2:写WAL,而且执行fsync。
|
||||
- VGROUPS:数据库中初始vgroup的数目。
|
||||
- 0:表示不压缩。
|
||||
- 1:表示一阶段压缩。
|
||||
- 2:表示两阶段压缩。
|
||||
- DURATION:数据文件存储数据的时间跨度。可以使用加单位的表示形式,如 DURATION 100h、DURATION 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。不加时间单位时默认单位为天,如 DURATION 50 表示 50 天。
|
||||
- FSYNC:当 WAL 参数设置为 2 时,落盘的周期。默认为 3000,单位毫秒。最小为 0,表示每次写入立即落盘;最大为 180000,即三分钟。
|
||||
- MAXROWS:文件块中记录的最大条数,默认为 4096 条。
|
||||
- MINROWS:文件块中记录的最小条数,默认为 100 条。
|
||||
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。
|
||||
- PAGES:一个 VNODE 中元数据存储引擎的缓存页个数,默认为 256,最小 64。一个 VNODE 元数据存储占用 PAGESIZE \* PAGES,默认情况下为 1MB 内存。
|
||||
- PAGESIZE:一个 VNODE 中元数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB 到 16 MB。
|
||||
- PRECISION:数据库的时间戳精度。ms 表示毫秒,us 表示微秒,ns 表示纳秒,默认 ms 毫秒。
|
||||
- REPLICA:表示数据库副本数,取值为 1 或 3,默认为 1。在集群中使用,副本数必须小于或等于 DNODE 的数目。
|
||||
- RETENTIONS:表示数据的聚合周期和保存时长,如 RETENTIONS 15s:7d,1m:21d,15m:50d 表示数据原始采集周期为 15 秒,原始数据保存 7 天;按 1 分钟聚合的数据保存 21 天;按 15 分钟聚合的数据保存 50 天。目前支持且只支持三级存储周期。
|
||||
- STRICT:表示数据同步的一致性要求,默认为 off。
|
||||
- on 表示强一致,即运行标准的 raft 协议,半数提交返回成功。
|
||||
- off 表示弱一致,本地提交即返回成功。
|
||||
- WAL:WAL 级别,默认为 1。
|
||||
- 1:写 WAL,但不执行 fsync。
|
||||
- 2:写 WAL,而且执行 fsync。
|
||||
- VGROUPS:数据库中初始 vgroup 的数目。
|
||||
- SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。
|
||||
- 0:表示可以创建多张超级表。
|
||||
- 1:表示只可以创建一张超级表。
|
||||
- WAL_RETENTION_PERIOD:wal文件的额外保留策略,用于数据订阅。wal的保存时长,单位为s。默认为0,即落盘后立即删除。-1表示不删除。
|
||||
- WAL_RETENTION_SIZE:wal文件的额外保留策略,用于数据订阅。wal的保存的最大上限,单位为KB。默认为0,即落盘后立即删除。-1表示不删除。
|
||||
- WAL_ROLL_PERIOD:wal文件切换时长,单位为s。当wal文件创建并写入后,经过该时间,会自动创建一个新的wal文件。默认为0,即仅在落盘时创建新文件。
|
||||
- WAL_SEGMENT_SIZE:wal单个文件大小,单位为KB。当前写入文件大小超过上限后会自动创建一个新的wal文件。默认为0,即仅在落盘时创建新文件。
|
||||
- 0:表示可以创建多张超级表。
|
||||
- 1:表示只可以创建一张超级表。
|
||||
- WAL_RETENTION_PERIOD:wal 文件的额外保留策略,用于数据订阅。wal 的保存时长,单位为 s。默认为 0,即落盘后立即删除。-1 表示不删除。
|
||||
- WAL_RETENTION_SIZE:wal 文件的额外保留策略,用于数据订阅。wal 的保存的最大上限,单位为 KB。默认为 0,即落盘后立即删除。-1 表示不删除。
|
||||
- WAL_ROLL_PERIOD:wal 文件切换时长,单位为 s。当 wal 文件创建并写入后,经过该时间,会自动创建一个新的 wal 文件。默认为 0,即仅在落盘时创建新文件。
|
||||
- WAL_SEGMENT_SIZE:wal 单个文件大小,单位为 KB。当前写入文件大小超过上限后会自动创建一个新的 wal 文件。默认为 0,即仅在落盘时创建新文件。
|
||||
|
||||
### 创建数据库示例
|
||||
|
||||
|
@ -104,10 +105,10 @@ DROP DATABASE [IF EXISTS] db_name
|
|||
|
||||
```sql
|
||||
ALTER DATABASE db_name [alter_database_options]
|
||||
|
||||
|
||||
alter_database_options:
|
||||
alter_database_option ...
|
||||
|
||||
|
||||
alter_database_option: {
|
||||
CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'}
|
||||
| CACHESIZE value
|
||||
|
@ -118,7 +119,7 @@ alter_database_option: {
|
|||
```
|
||||
|
||||
:::note
|
||||
其它参数在3.0.0.0中暂不支持修改
|
||||
其它参数在 3.0.0.0 中暂不支持修改
|
||||
|
||||
:::
|
||||
|
||||
|
@ -139,3 +140,17 @@ SHOW CREATE DATABASE db_name;
|
|||
常用于数据库迁移。对一个已经存在的数据库,返回其创建语句;在另一个集群中执行该语句,就能得到一个设置完全相同的 Database。
|
||||
|
||||
### 查看数据库参数
|
||||
|
||||
```sql
|
||||
SHOW DATABASES \G;
|
||||
```
|
||||
|
||||
会列出系统中所有数据库的配置参数,并且每行只显示一个参数。
|
||||
|
||||
## 删除过期数据
|
||||
|
||||
```sql
|
||||
TRIM DATABASE db_name;
|
||||
```
|
||||
|
||||
删除过期数据,并根据多级存储的配置归整数据。
|
||||
|
|
|
@ -139,6 +139,7 @@ ALTER TABLE tb_name RENAME COLUMN old_col_name new_col_name
|
|||
|
||||
## 修改子表
|
||||
|
||||
```sql
|
||||
ALTER TABLE [db_name.]tb_name alter_table_clause
|
||||
|
||||
alter_table_clause: {
|
||||
|
@ -153,6 +154,7 @@ alter_table_option: {
|
|||
TTL value
|
||||
| COMMENT 'string_value'
|
||||
}
|
||||
```
|
||||
|
||||
**使用说明**
|
||||
1. 对子表的列和标签的修改,除了更改标签值以外,都要通过超级表才能进行。
|
||||
|
@ -192,5 +194,5 @@ SHOW CREATE TABLE tb_name;
|
|||
### 获取表结构信息
|
||||
|
||||
```
|
||||
DESCRIBE tb_name;
|
||||
DESCRIBE [db_name.]tb_name;
|
||||
```
|
|
@ -46,7 +46,7 @@ SHOW CREATE STABLE stb_name;
|
|||
### 获取超级表的结构信息
|
||||
|
||||
```
|
||||
DESCRIBE stb_name;
|
||||
DESCRIBE [db_name.]stb_name;
|
||||
```
|
||||
|
||||
## 删除超级表
|
||||
|
|
|
@ -218,7 +218,7 @@ GROUP BY 子句中的表达式可以包含表或视图中的任何列,这些
|
|||
|
||||
PARTITION BY 子句是 TDengine 特色语法,按 part_list 对数据进行切分,在每个切分的分片中进行计算。
|
||||
|
||||
详见 [TDengine 特色查询](taos-sql/distinguished)
|
||||
详见 [TDengine 特色查询](./distinguished)
|
||||
|
||||
## ORDER BY
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ toc_max_heading_level: 4
|
|||
#### ABS
|
||||
|
||||
```sql
|
||||
SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的绝对值
|
||||
|
@ -31,7 +31,7 @@ toc_max_heading_level: 4
|
|||
#### ACOS
|
||||
|
||||
```sql
|
||||
SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的反余弦结果
|
||||
|
@ -49,7 +49,7 @@ toc_max_heading_level: 4
|
|||
#### ASIN
|
||||
|
||||
```sql
|
||||
SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的反正弦结果
|
||||
|
@ -68,7 +68,7 @@ toc_max_heading_level: 4
|
|||
#### ATAN
|
||||
|
||||
```sql
|
||||
SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的反正切结果
|
||||
|
@ -86,7 +86,7 @@ toc_max_heading_level: 4
|
|||
|
||||
#### CEIL
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -108,7 +108,7 @@ SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
#### COS
|
||||
|
||||
```sql
|
||||
SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的余弦结果
|
||||
|
@ -125,7 +125,7 @@ SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
#### FLOOR
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -135,7 +135,7 @@ SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
#### LOG
|
||||
|
||||
```sql
|
||||
SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列对于底数 base 的对数
|
||||
|
@ -154,7 +154,7 @@ SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
#### POW
|
||||
|
||||
```sql
|
||||
SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的指数为 power 的幂
|
||||
|
@ -172,7 +172,7 @@ SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
#### ROUND
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -183,7 +183,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
#### SIN
|
||||
|
||||
```sql
|
||||
SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的正弦结果
|
||||
|
@ -201,7 +201,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
#### SQRT
|
||||
|
||||
```sql
|
||||
SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的平方根
|
||||
|
@ -219,7 +219,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
#### TAN
|
||||
|
||||
```sql
|
||||
SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的正切结果
|
||||
|
@ -240,8 +240,8 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
#### CHAR_LENGTH
|
||||
|
||||
```
|
||||
SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```sql
|
||||
SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:以字符计数的字符串长度。
|
||||
|
@ -257,7 +257,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
#### CONCAT
|
||||
|
||||
```sql
|
||||
SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:字符串连接函数。
|
||||
|
@ -273,8 +273,8 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
#### CONCAT_WS
|
||||
|
||||
```
|
||||
SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```sql
|
||||
SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:带分隔符的字符串连接函数。
|
||||
|
@ -290,8 +290,8 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
#### LENGTH
|
||||
|
||||
```
|
||||
SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```sql
|
||||
SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:以字节计数的字符串长度。
|
||||
|
@ -307,8 +307,8 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
#### LOWER
|
||||
|
||||
```
|
||||
SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```sql
|
||||
SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:将字符串参数值转换为全小写字母。
|
||||
|
@ -324,8 +324,8 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
#### LTRIM
|
||||
|
||||
```
|
||||
SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```sql
|
||||
SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:返回清除左边空格后的字符串。
|
||||
|
@ -341,8 +341,8 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
#### RTRIM
|
||||
|
||||
```
|
||||
SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```sql
|
||||
SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:返回清除右边空格后的字符串。
|
||||
|
@ -358,8 +358,8 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
#### SUBSTR
|
||||
|
||||
```
|
||||
SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```sql
|
||||
SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:从源字符串 str 中的指定位置 pos 开始取一个长度为 len 的子串并返回。
|
||||
|
@ -375,8 +375,8 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
#### UPPER
|
||||
|
||||
```
|
||||
SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```sql
|
||||
SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:将字符串参数值转换为全大写字母。
|
||||
|
@ -397,7 +397,7 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
#### CAST
|
||||
|
||||
```sql
|
||||
SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:数据类型转换函数,输入参数 expression 支持普通列、常量、标量函数及它们之间的四则运算,只适用于 select 子句中。
|
||||
|
@ -587,7 +587,7 @@ TDengine 支持针对数据的聚合查询。提供如下聚合函数。
|
|||
|
||||
### AVG
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT AVG(field_name) FROM tb_name [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -602,7 +602,7 @@ SELECT AVG(field_name) FROM tb_name [WHERE clause];
|
|||
|
||||
### COUNT
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -623,7 +623,7 @@ SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
|
|||
|
||||
### ELAPSED
|
||||
|
||||
```mysql
|
||||
```sql
|
||||
SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]];
|
||||
```
|
||||
|
||||
|
@ -649,7 +649,7 @@ SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE
|
|||
|
||||
### LEASTSQUARES
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -664,7 +664,7 @@ SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]
|
|||
|
||||
### MODE
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT MODE(field_name) FROM tb_name [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -679,7 +679,7 @@ SELECT MODE(field_name) FROM tb_name [WHERE clause];
|
|||
|
||||
### SPREAD
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -694,7 +694,7 @@ SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
### STDDEV
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -709,7 +709,7 @@ SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
|
|||
|
||||
### SUM
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT SUM(field_name) FROM tb_name [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -724,7 +724,7 @@ SELECT SUM(field_name) FROM tb_name [WHERE clause];
|
|||
|
||||
### HYPERLOGLOG
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -741,7 +741,7 @@ SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
### HISTOGRAM
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_name [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -775,7 +775,7 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam
|
|||
|
||||
### APERCENTILE
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT APERCENTILE(field_name, P[, algo_type])
|
||||
FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
@ -790,7 +790,7 @@ FROM { tb_name | stb_name } [WHERE clause]
|
|||
|
||||
### BOTTOM
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -810,7 +810,7 @@ SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
### FIRST
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -830,7 +830,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
### INTERP
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ RANGE(timestamp1,timestamp2) ] [EVERY(interval)] [FILL ({ VALUE | PREV | NULL | LINEAR | NEXT})];
|
||||
```
|
||||
|
||||
|
@ -854,7 +854,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [
|
|||
|
||||
### LAST
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -875,7 +875,7 @@ SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
### LAST_ROW
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
|
||||
```
|
||||
|
||||
|
@ -894,7 +894,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
|
|||
|
||||
### MAX
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -909,7 +909,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
### MIN
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -924,7 +924,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
|||
|
||||
### PERCENTILE
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -941,7 +941,7 @@ SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
|
|||
|
||||
### TAIL
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -958,7 +958,7 @@ SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
|
|||
|
||||
### TOP
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -978,7 +978,7 @@ SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
### UNIQUE
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -998,7 +998,7 @@ SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
|||
### CSUM
|
||||
|
||||
```sql
|
||||
SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:累加和(Cumulative sum),输出行与输入行数相同。
|
||||
|
@ -1020,7 +1020,7 @@ SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
|||
|
||||
### DERIVATIVE
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -1037,9 +1037,9 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER
|
|||
|
||||
### DIFF
|
||||
|
||||
```sql
|
||||
SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHERE clause];
|
||||
```
|
||||
```sql
|
||||
SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHERE clause];
|
||||
```
|
||||
|
||||
**功能说明**:统计表中某列的值与前一行对应值的差。 ignore_negative 取值为 0|1 , 可以不填,默认值为 0. 不忽略负值。ignore_negative 为 1 时表示忽略负数。
|
||||
|
||||
|
@ -1054,7 +1054,7 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER
|
|||
|
||||
### IRATE
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT IRATE(field_name) FROM tb_name WHERE clause;
|
||||
```
|
||||
|
||||
|
@ -1069,7 +1069,7 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause;
|
|||
### MAVG
|
||||
|
||||
```sql
|
||||
SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**: 计算连续 k 个值的移动平均数(moving average)。如果输入行数小于 k,则无结果输出。参数 k 的合法输入范围是 1≤ k ≤ 1000。
|
||||
|
@ -1091,7 +1091,7 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause;
|
|||
### SAMPLE
|
||||
|
||||
```sql
|
||||
SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。
|
||||
|
@ -1111,7 +1111,7 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause;
|
|||
|
||||
### STATECOUNT
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
|
@ -1166,7 +1166,7 @@ SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [W
|
|||
|
||||
### TWA
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT TWA(field_name) FROM tb_name WHERE clause;
|
||||
```
|
||||
|
||||
|
@ -1185,7 +1185,7 @@ SELECT TWA(field_name) FROM tb_name WHERE clause;
|
|||
|
||||
### DATABASE
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT DATABASE();
|
||||
```
|
||||
|
||||
|
@ -1194,7 +1194,7 @@ SELECT DATABASE();
|
|||
|
||||
### CLIENT_VERSION
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT CLIENT_VERSION();
|
||||
```
|
||||
|
||||
|
@ -1202,7 +1202,7 @@ SELECT CLIENT_VERSION();
|
|||
|
||||
### SERVER_VERSION
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT SERVER_VERSION();
|
||||
```
|
||||
|
||||
|
@ -1210,7 +1210,7 @@ SELECT SERVER_VERSION();
|
|||
|
||||
### SERVER_STATUS
|
||||
|
||||
```
|
||||
```sql
|
||||
SELECT SERVER_VERSION();
|
||||
```
|
||||
|
||||
|
|
|
@ -0,0 +1,66 @@
|
|||
---
|
||||
sidebar_label: 消息队列
|
||||
title: 消息队列
|
||||
---
|
||||
|
||||
TDengine 3.0.0.0 开始对消息队列做了大幅的优化和增强以简化用户的解决方案。
|
||||
|
||||
## 创建订阅主题
|
||||
|
||||
```sql
|
||||
CREATE TOPIC [IF NOT EXISTS] topic_name AS {subquery | DATABASE db_name | STABLE stb_name };
|
||||
```
|
||||
|
||||
订阅主题包括三种:列订阅、超级表订阅和数据库订阅。
|
||||
|
||||
**列订阅是**用 subquery 描述,支持过滤和标量函数和 UDF 标量函数,不支持 JOIN、GROUP BY、窗口切分子句、聚合函数和 UDF 聚合函数。列订阅规则如下:
|
||||
|
||||
1. TOPIC 一旦创建则返回结果的字段确定
|
||||
2. 被订阅或用于计算的列不可被删除、修改
|
||||
3. 列可以新增,但新增的列不出现在订阅结果字段中
|
||||
4. 对于 select \*,则订阅展开为创建时所有的列(子表、普通表为数据列,超级表为数据列加标签列)
|
||||
|
||||
**超级表订阅和数据库订阅**规则如下:
|
||||
|
||||
1. 被订阅主体的 schema 变更不受限
|
||||
2. 返回消息中 schema 是块级别的,每块的 schema 可能不一样
|
||||
3. 列变更后写入的数据若未落盘,将以写入时的 schema 返回
|
||||
4. 列变更后写入的数据若未已落盘,将以落盘时的 schema 返回
|
||||
|
||||
## 删除订阅主题
|
||||
|
||||
```sql
|
||||
DROP TOPIC [IF EXISTS] topic_name;
|
||||
```
|
||||
|
||||
此时如果该订阅主题上存在 consumer,则此 consumer 会收到一个错误。
|
||||
|
||||
## 查看订阅主题
|
||||
|
||||
## SHOW TOPICS
|
||||
|
||||
```sql
|
||||
SHOW TOPICS;
|
||||
```
|
||||
|
||||
显示当前数据库下的所有主题的信息。
|
||||
|
||||
## 创建消费组
|
||||
|
||||
消费组的创建只能通过 TDengine 客户端驱动或者连接器所提供的 API 创建。
|
||||
|
||||
## 删除消费组
|
||||
|
||||
```sql
|
||||
DROP CONSUMER GROUP [IF EXISTS] cgroup_name ON topic_name;
|
||||
```
|
||||
|
||||
删除主题 topic_name 上的消费组 cgroup_name。
|
||||
|
||||
## 查看消费组
|
||||
|
||||
```sql
|
||||
SHOW CONSUMERS;
|
||||
```
|
||||
|
||||
显示当前数据库下所有活跃的消费者的信息。
|
|
@ -9,8 +9,8 @@ title: 命名与边界限制
|
|||
2. 允许英文字符或下划线开头,不允许以数字开头
|
||||
3. 不区分大小写
|
||||
4. 转义后表(列)名规则:
|
||||
为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`"。可用让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。
|
||||
转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一。
|
||||
为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`"。可用让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查
|
||||
转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一
|
||||
|
||||
例如:\`aBc\` 和 \`abc\` 是不同的表(列)名,但是 abc 和 aBc 是相同的表(列)名。
|
||||
需要注意的是转义字符中的内容必须是可打印字符。
|
||||
|
@ -23,28 +23,30 @@ title: 命名与边界限制
|
|||
|
||||
## 一般限制
|
||||
|
||||
- 数据库名最大长度为 32。
|
||||
- 数据库名最大长度为 32
|
||||
- 表名最大长度为 192,不包括数据库名前缀和分隔符
|
||||
- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。
|
||||
- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
|
||||
- 列名最大长度为 64
|
||||
- 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。
|
||||
- 标签名最大长度为 64
|
||||
- 最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16KB 。
|
||||
- SQL 语句最大长度 1048576 个字符,也可通过客户端配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576。
|
||||
- SELECT 语句的查询结果,最多允许返回 4096 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。
|
||||
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制。
|
||||
- 最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16KB
|
||||
- SQL 语句最大长度 1048576 个字符,也可通过客户端配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576
|
||||
- SELECT 语句的查询结果,最多允许返回 4096 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错
|
||||
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
|
||||
- 数据库的副本数只能设置为 1 或 3
|
||||
- 用户名的最大长度是 23 个字节
|
||||
- 用户密码的最大长度是 15 个字节
|
||||
- 总数据行数取决于可用资源
|
||||
- 总数据行数取决于可用资源
|
||||
- 单个数据库的虚拟结点数上限为 1024
|
||||
|
||||
## 表(列)名合法性说明
|
||||
|
||||
### TDengine 中的表(列)名命名规则如下:
|
||||
|
||||
只能由字母、数字、下划线构成,数字不能在首位,长度不能超过 192 字节,不区分大小写。这里表名称不包括数据库名的前缀和分隔符。
|
||||
|
||||
### 转义后表(列)名规则:
|
||||
|
||||
为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`",可以避免表名与关键词的冲突,同时不受限于上述表名合法性约束检查,转义符不计入表名的长度。
|
||||
转义后的表(列)名同样受到长度限制要求,且长度计算的时候不计算转义符。使用转义字符以后,不再对转义字符中的内容进行大小写统一。
|
||||
|
||||
|
|
|
@ -0,0 +1,155 @@
|
|||
---
|
||||
sidebar_label: 集群管理
|
||||
title: 集群管理
|
||||
---
|
||||
|
||||
组成 TDengine 集群的物理实体是 dnode (data node 的缩写),它是一个运行在操作系统之上的进程。在 dnode 中可以建立负责时序数据存储的 vnode (virtual node),在多节点集群环境下当某个数据库的 replica 为 3 时,该数据库中的每个 vgroup 由 3 个 vnode 组成;当数据库的 replica 为 1 时,该数据库中的每个 vgroup 由 1 个 vnode 组成。如果要想配置某个数据库为多副本,则集群中的 dnode 数量至少为 3。在 dnode 还可以创建 mnode (management node),单个集群中最多可以创建三个 mnode。在 TDengine 3.0.0.0 中为了支持存算分离,引入了一种新的逻辑节点 qnode (query node),qnode 和 vnode 既可以共存在一个 dnode 中,也可以完全分离在不同的 dnode 上。
|
||||
|
||||
## 创建数据节点
|
||||
|
||||
```sql
|
||||
CREATE DNODE {dnode_endpoint | dnode_host_name PORT port_val}
|
||||
```
|
||||
|
||||
其中 `dnode_endpoint` 是形成 `hostname:port`的格式。也可以分开指定 hostname 和 port。
|
||||
|
||||
实际操作中推荐先创建 dnode,再启动相应的 dnode 进程,这样该 dnode 就可以立即根据其配置文件中的 firstEP 加入集群。每个 dnode 在加入成功后都会被分配一个 ID。
|
||||
|
||||
## 查看数据节点
|
||||
|
||||
```sql
|
||||
SHOW DNODES;
|
||||
```
|
||||
|
||||
可以列出集群中所有的数据节点,所列出的字段有 dnode 的 ID, endpoint, status。
|
||||
|
||||
## 删除数据节点
|
||||
|
||||
```sql
|
||||
DROP DNODE {dnode_id | dnode_endpoint}
|
||||
```
|
||||
|
||||
可以用 dnoe_id 或 endpoint 两种方式从集群中删除一个 dnode。注意删除 dnode 不等于停止相应的进程。实际中推荐先将一个 dnode 删除之后再停止其所对应的进程。
|
||||
|
||||
## 修改数据节点配置
|
||||
|
||||
```sql
|
||||
ALTER DNODE dnode_id dnode_option
|
||||
|
||||
ALTER ALL DNODES dnode_option
|
||||
|
||||
dnode_option: {
|
||||
'resetLog'
|
||||
| 'resetQueryCache'
|
||||
| 'balance' value
|
||||
| 'monitor' value
|
||||
| 'debugFlag' value
|
||||
| 'monDebugFlag' value
|
||||
| 'vDebugFlag' value
|
||||
| 'mDebugFlag' value
|
||||
| 'cDebugFlag' value
|
||||
| 'httpDebugFlag' value
|
||||
| 'qDebugflag' value
|
||||
| 'sdbDebugFlag' value
|
||||
| 'uDebugFlag' value
|
||||
| 'tsdbDebugFlag' value
|
||||
| 'sDebugflag' value
|
||||
| 'rpcDebugFlag' value
|
||||
| 'dDebugFlag' value
|
||||
| 'mqttDebugFlag' value
|
||||
| 'wDebugFlag' value
|
||||
| 'tmrDebugFlag' value
|
||||
| 'cqDebugFlag' value
|
||||
}
|
||||
```
|
||||
|
||||
上面语法中的这些可修改配置项其配置方式与 dnode 配置文件中的配置方式相同,区别是修改是动态的立即生效,且不需要重启 dnode。
|
||||
|
||||
## 添加管理节点
|
||||
|
||||
```sql
|
||||
CREATE MNODE ON DNODE dnode_id
|
||||
```
|
||||
|
||||
系统启动默认在 firstEP 节点上创建一个 MNODE,用户可以使用此语句创建更多的 MNODE 来提高系统可用性。一个集群最多存在三个 MNODE,一个 DNODE 上只能创建一个 MNODE。
|
||||
|
||||
## 查看管理节点
|
||||
|
||||
```sql
|
||||
SHOW MNODES;
|
||||
```
|
||||
|
||||
列出集群中所有的管理节点,包括其 ID,所在 DNODE 以及状态。
|
||||
|
||||
## 删除管理节点
|
||||
|
||||
```sql
|
||||
DROP MNODE ON DNODE dnode_id;
|
||||
```
|
||||
|
||||
删除 dnode_id 所指定的 DNODE 上的 MNODE。
|
||||
|
||||
## 创建查询节点
|
||||
|
||||
```sql
|
||||
CREATE QNODE ON DNODE dnode_id;
|
||||
```
|
||||
|
||||
系统启动默认没有 QNODE,用户可以创建 QNODE 来实现计算和存储的分离。一个 DNODE 上只能创建一个 QNODE。一个 DNODE 的 `supportVnodes` 参数如果不为 0,同时又在其上创建上 QNODE,则在该 dnode 中既有负责存储管理的 vnode 又有负责查询计算的 qnode,如果还在该 dnode 上创建了 mnode,则一个 dnode 上最多三种逻辑节点都可以存在。但通过配置也可以使其彻底分离。将一个 dnode 的`supportVnodes`配置为 0,可以选择在其上创建 mnode 或者 qnode 中的一种,这样可以实现三种逻辑节点在物理上的彻底分离。
|
||||
|
||||
## 查看查询节点
|
||||
|
||||
```sql
|
||||
SHOW QNODES;
|
||||
```
|
||||
|
||||
列出集群中所有查询节点,包括 ID,及所在 DNODE。
|
||||
|
||||
## 删除查询节点
|
||||
|
||||
```sql
|
||||
DROP QNODE ON DNODE dnode_id;
|
||||
```
|
||||
|
||||
删除 ID 为 dnode_id 的 DNODE 上的 QNODE,但并不会影响该 dnode 的状态。
|
||||
|
||||
## 修改客户端配置
|
||||
|
||||
如果将客户端也看作广义的集群的一部分,可以通过如下命令动态修改客户端配置参数。
|
||||
|
||||
```sql
|
||||
ALTER LOCAL local_option
|
||||
|
||||
local_option: {
|
||||
'resetLog'
|
||||
| 'rpcDebugFlag' value
|
||||
| 'tmrDebugFlag' value
|
||||
| 'cDebugFlag' value
|
||||
| 'uDebugFlag' value
|
||||
| 'debugFlag' value
|
||||
}
|
||||
```
|
||||
|
||||
上面语法中的参数与在配置文件中配置客户端的用法相同,但不需要重启客户端,修改后立即生效。
|
||||
|
||||
## 查看客户端配置
|
||||
|
||||
```sql
|
||||
SHOW LOCAL VARIABLES;
|
||||
```
|
||||
|
||||
## 合并 vgroup
|
||||
|
||||
```sql
|
||||
MERGE VGROUP vgroup_no1 vgroup_no2;
|
||||
```
|
||||
|
||||
如果在系统实际运行一段时间后,因为不同时间线的数据特征不同导致在 vgroups 之间的数据和负载分布不均衡,可以通过合并或拆分 vgroups 的方式逐步实现负载均衡。
|
||||
|
||||
## 拆分 vgroup
|
||||
|
||||
```sql
|
||||
SPLIT VGROUP vgroup_no;
|
||||
```
|
||||
|
||||
会创建一个新的 vgroup,并将指定 vgroup 中的数据按照一致性 HASH 迁移一部分到新的 vgroup 中。此过程中,原 vgroup 可以正常提供读写服务。
|
|
@ -3,22 +3,15 @@ sidebar_label: 元数据库
|
|||
title: 元数据库
|
||||
---
|
||||
|
||||
TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数据库元数据、数据库系统信息和状态的访问,例如数据库或表的名称,当前执行的 SQL 语句等。
|
||||
TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数据库元数据、数据库系统信息和状态的访问,例如数据库或表的名称,当前执行的 SQL 语句等。该数据库存储有关 TDengine 维护的所有其他数据库的信息。它包含多个只读表。实际上,这些表都是视图,而不是基表,因此没有与它们关联的文件。所以对这些表只能查询,不能进行 INSERT 等写入操作。`INFORMATION_SCHEMA` 数据库旨在以一种更一致的方式来提供对 TDengine 支持的各种 SHOW 语句(如 SHOW TABLES、SHOW DATABASES)所提供的信息的访问。与 SHOW 语句相比,使用 SELECT ... FROM INFORMATION_SCHEMA.tablename 具有以下优点:
|
||||
|
||||
`INFORMATION_SCHEMA` 是 TDengine 启动时自动创建的数据库,该数据库存储有关 TDengine 维护的所有其他数据库的信息。它包含多个只读表。实际上,这些表都是视图,而不是基表,因此没有与它们关联的文件。所以对这些表只能查询,不能进行 INSERT 等写入操作。
|
||||
1. 可以使用 USE 语句将 INFORMATION_SCHEMA 设为默认数据库
|
||||
2. 可以使用 SELECT 语句熟悉的语法,只需要学习一些表名和列名
|
||||
3. 可以对查询结果进行筛选、排序等操作。事实上,可以使用任意 TDengine 支持的 SELECT 语句对 INFORMATION_SCHEMA 中的表进行查询
|
||||
4. TDengine 在后续演进中可以灵活的添加已有 INFORMATION_SCHEMA 中表的列,而不用担心对既有业务系统造成影响
|
||||
5. 与其他数据库系统更具互操作性。例如,Oracle 数据库用户熟悉查询 Oracle 数据字典中的表
|
||||
|
||||
可以使用 USE 语句将 INFORMATION_SCHEMA 设为默认数据库。
|
||||
|
||||
INFORMATION_SCHEMA 旨在以一种更一致的方式来提供对 TDengine 支持的各种 SHOW 语句(如 SHOW TABLES、SHOW DATABASES)提供的信息的访问。与 SHOW 语句相比,使用 SELECT ... FROM INFORMATION_SCHEMA.tablename 具有以下优点:
|
||||
|
||||
您可以使用 SELECT 语句熟悉的语法,只需要学习一些表名和列名。
|
||||
|
||||
您可以对查询结果进行筛选、排序等操作,事实上,您可以使用任意 TDengine 支持的 SELECT 语句对 INFORMATION_SCHEMA 中的表进行查询。
|
||||
TDengine 在后续演进中可以灵活的添加已有 INFORMATION_SCHEMA 中表的列,而不用担心对既有业务系统造成影响。
|
||||
|
||||
此技术与其他数据库系统更具互操作性。例如,Oracle 数据库用户熟悉查询 Oracle 数据字典中的表。
|
||||
|
||||
由于 SHOW 语句已经被开发者熟悉的和广泛使用,所以它们仍然是可用的。
|
||||
Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们仍然被保留。
|
||||
|
||||
本章将详细介绍 `INFORMATION_SCHEMA` 这个内置元数据库中的表和表结构。
|
||||
|
||||
|
|
|
@ -23,6 +23,23 @@ password最长为128字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~
|
|||
DROP USER user_name;
|
||||
```
|
||||
|
||||
## 修改用户信息
|
||||
|
||||
```sql
|
||||
ALTER USER user_name alter_user_clause
|
||||
|
||||
alter_user_clause: {
|
||||
PASS 'literal'
|
||||
| ENABLE value
|
||||
| SYSINFO value
|
||||
}
|
||||
```
|
||||
|
||||
- PASS:修改用户密码。
|
||||
- ENABLE:修改用户是否启用。1表示启用此用户,0表示禁用此用户。
|
||||
- SYSINFO:修改用户是否可查看系统信息。1表示可以查看系统信息,0表示不可以查看系统信息。
|
||||
|
||||
|
||||
## 授权
|
||||
|
||||
```sql
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
---
|
||||
sidebar_label: 自定义函数
|
||||
title: 用户自定义函数
|
||||
---
|
||||
|
||||
除了 TDengine 的内置函数以外,用户还可以编写自己的函数逻辑并加入TDengine系统中。
|
||||
|
||||
## 创建函数
|
||||
|
||||
```sql
|
||||
CREATE [AGGREGATE] FUNCTION func_name AS library_path OUTPUTTYPE type_name [BUFSIZE value]
|
||||
```
|
||||
|
||||
语法说明:
|
||||
|
||||
AGGREGATE:标识此函数是标量函数还是聚集函数。
|
||||
func_name:函数名,必须与函数实现中udfNormalFunc的实际名称一致。
|
||||
library_path:包含UDF函数实现的动态链接库的绝对路径,是在客户端侧主机上的绝对路径。
|
||||
OUTPUTTYPE:标识此函数的返回类型。
|
||||
BUFSIZE:中间结果的缓冲区大小,单位是字节。不设置则默认为0。最大不可超过512字节。
|
||||
|
||||
关于如何开发自定义函数,请参考 [UDF使用说明](../develop/udf)。
|
||||
|
||||
## 删除自定义函数
|
||||
|
||||
```sql
|
||||
DROP FUNCTION func_name
|
||||
```
|
|
@ -0,0 +1,47 @@
|
|||
---
|
||||
sidebar_label: 索引
|
||||
title: 使用索引
|
||||
---
|
||||
|
||||
TDengine 从 3.0.0.0 版本开始引入了索引功能,支持 SMA 索引和 FULLTEXT 索引。
|
||||
|
||||
## 创建索引
|
||||
|
||||
```sql
|
||||
CREATE FULLTEXT INDEX index_name ON tb_name (col_name [, col_name] ...)
|
||||
|
||||
CREATE SMA INDEX index_name ON tb_name index_option
|
||||
|
||||
index_option:
|
||||
FUNCTION(functions) INTERVAL(interval_val [, interval_offset]) [SLIDING(sliding_val)] [WATERMARK(watermark_val)] [MAX_DELAY(max_delay_val)]
|
||||
|
||||
functions:
|
||||
function [, function] ...
|
||||
```
|
||||
|
||||
### SMA 索引
|
||||
|
||||
对指定列按 INTERVAL 子句定义的时间窗口创建进行预聚合计算,预聚合计算类型由 functions_string 指定。SMA 索引能提升指定时间段的聚合查询的性能。目前,限制一个超级表只能创建一个 SMA INDEX。
|
||||
|
||||
- 支持的函数包括 MAX、MIN 和 SUM。
|
||||
- WATERMARK: 最小单位毫秒,取值范围 [0ms, 900000ms],默认值为 5 秒,只可用于超级表。
|
||||
- MAX_DELAY: 最小单位毫秒,取值范围 [1ms, 900000ms],默认值为 interval 的值(但不能超过最大值),只可用于超级表。注:不建议 MAX_DELAY 设置太小,否则会过于频繁的推送结果,影响存储和查询性能,如无特殊需求,取默认值即可。
|
||||
|
||||
### FULLTEXT 索引
|
||||
|
||||
对指定列建立文本索引,可以提升含有文本过滤的查询的性能。FULLTEXT 索引不支持 index_option 语法。现阶段只支持对 JSON 类型的标签列创建 FULLTEXT 索引。不支持多列联合索引,但可以为每个列分布创建 FULLTEXT 索引。
|
||||
|
||||
## 删除索引
|
||||
|
||||
```sql
|
||||
DROP INDEX index_name;
|
||||
```
|
||||
|
||||
## 查看索引
|
||||
|
||||
````sql
|
||||
```sql
|
||||
SHOW INDEXES FROM tbl_name [FROM db_name];
|
||||
````
|
||||
|
||||
显示在所指定的数据库或表上已创建的索引。
|
|
@ -0,0 +1,38 @@
|
|||
---
|
||||
sidebar_label: 异常恢复
|
||||
title: 异常恢复
|
||||
---
|
||||
|
||||
在一个复杂的应用场景中,连接和查询任务等有可能进入一种错误状态或者耗时过长迟迟无法结束,此时需要有能够终止这些连接或任务的方法。
|
||||
|
||||
## 终止连接
|
||||
|
||||
```sql
|
||||
KILL CONNECTION conn_id;
|
||||
```
|
||||
|
||||
conn_id 可以通过 `SHOW CONNECTIONS` 获取。
|
||||
|
||||
## 终止查询
|
||||
|
||||
```sql
|
||||
SHOW QUERY query_id;
|
||||
```
|
||||
|
||||
query_id 可以通过 `SHOW QUERIES` 获取。
|
||||
|
||||
## 终止事务
|
||||
|
||||
```sql
|
||||
KILL TRANSACTION trans_id
|
||||
```
|
||||
|
||||
trans_id 可以通过 `SHOW TRANSACTIONS` 获取。
|
||||
|
||||
## 重置客户端缓存
|
||||
|
||||
```sql
|
||||
RESET QUERY CACHE;
|
||||
```
|
||||
|
||||
如果在多客户端情况下出现元数据不同步的情况,可以用这条命令强制清空客户端缓存,随后客户端会从服务端拉取最新的元数据。
|
|
@ -84,20 +84,20 @@
|
|||
#define TK_COMP 66
|
||||
#define TK_DURATION 67
|
||||
#define TK_NK_VARIABLE 68
|
||||
#define TK_FSYNC 69
|
||||
#define TK_MAXROWS 70
|
||||
#define TK_MINROWS 71
|
||||
#define TK_KEEP 72
|
||||
#define TK_PAGES 73
|
||||
#define TK_PAGESIZE 74
|
||||
#define TK_PRECISION 75
|
||||
#define TK_REPLICA 76
|
||||
#define TK_STRICT 77
|
||||
#define TK_WAL 78
|
||||
#define TK_VGROUPS 79
|
||||
#define TK_SINGLE_STABLE 80
|
||||
#define TK_RETENTIONS 81
|
||||
#define TK_SCHEMALESS 82
|
||||
#define TK_MAXROWS 69
|
||||
#define TK_MINROWS 70
|
||||
#define TK_KEEP 71
|
||||
#define TK_PAGES 72
|
||||
#define TK_PAGESIZE 73
|
||||
#define TK_PRECISION 74
|
||||
#define TK_REPLICA 75
|
||||
#define TK_STRICT 76
|
||||
#define TK_VGROUPS 77
|
||||
#define TK_SINGLE_STABLE 78
|
||||
#define TK_RETENTIONS 79
|
||||
#define TK_SCHEMALESS 80
|
||||
#define TK_WAL_LEVEL 81
|
||||
#define TK_WAL_FSYNC_PERIOD 82
|
||||
#define TK_WAL_RETENTION_PERIOD 83
|
||||
#define TK_WAL_RETENTION_SIZE 84
|
||||
#define TK_WAL_ROLL_PERIOD 85
|
||||
|
@ -188,93 +188,90 @@
|
|||
#define TK_NK_BOOL 170
|
||||
#define TK_RATIO 171
|
||||
#define TK_NK_FLOAT 172
|
||||
#define TK_COMPACT 173
|
||||
#define TK_VNODES 174
|
||||
#define TK_IN 175
|
||||
#define TK_OUTPUTTYPE 176
|
||||
#define TK_AGGREGATE 177
|
||||
#define TK_BUFSIZE 178
|
||||
#define TK_STREAM 179
|
||||
#define TK_INTO 180
|
||||
#define TK_TRIGGER 181
|
||||
#define TK_AT_ONCE 182
|
||||
#define TK_WINDOW_CLOSE 183
|
||||
#define TK_IGNORE 184
|
||||
#define TK_EXPIRED 185
|
||||
#define TK_KILL 186
|
||||
#define TK_CONNECTION 187
|
||||
#define TK_TRANSACTION 188
|
||||
#define TK_BALANCE 189
|
||||
#define TK_VGROUP 190
|
||||
#define TK_MERGE 191
|
||||
#define TK_REDISTRIBUTE 192
|
||||
#define TK_SPLIT 193
|
||||
#define TK_SYNCDB 194
|
||||
#define TK_DELETE 195
|
||||
#define TK_INSERT 196
|
||||
#define TK_NULL 197
|
||||
#define TK_NK_QUESTION 198
|
||||
#define TK_NK_ARROW 199
|
||||
#define TK_ROWTS 200
|
||||
#define TK_TBNAME 201
|
||||
#define TK_QSTART 202
|
||||
#define TK_QEND 203
|
||||
#define TK_QDURATION 204
|
||||
#define TK_WSTART 205
|
||||
#define TK_WEND 206
|
||||
#define TK_WDURATION 207
|
||||
#define TK_CAST 208
|
||||
#define TK_NOW 209
|
||||
#define TK_TODAY 210
|
||||
#define TK_TIMEZONE 211
|
||||
#define TK_CLIENT_VERSION 212
|
||||
#define TK_SERVER_VERSION 213
|
||||
#define TK_SERVER_STATUS 214
|
||||
#define TK_CURRENT_USER 215
|
||||
#define TK_COUNT 216
|
||||
#define TK_LAST_ROW 217
|
||||
#define TK_BETWEEN 218
|
||||
#define TK_IS 219
|
||||
#define TK_NK_LT 220
|
||||
#define TK_NK_GT 221
|
||||
#define TK_NK_LE 222
|
||||
#define TK_NK_GE 223
|
||||
#define TK_NK_NE 224
|
||||
#define TK_MATCH 225
|
||||
#define TK_NMATCH 226
|
||||
#define TK_CONTAINS 227
|
||||
#define TK_JOIN 228
|
||||
#define TK_INNER 229
|
||||
#define TK_SELECT 230
|
||||
#define TK_DISTINCT 231
|
||||
#define TK_WHERE 232
|
||||
#define TK_PARTITION 233
|
||||
#define TK_BY 234
|
||||
#define TK_SESSION 235
|
||||
#define TK_STATE_WINDOW 236
|
||||
#define TK_SLIDING 237
|
||||
#define TK_FILL 238
|
||||
#define TK_VALUE 239
|
||||
#define TK_NONE 240
|
||||
#define TK_PREV 241
|
||||
#define TK_LINEAR 242
|
||||
#define TK_NEXT 243
|
||||
#define TK_HAVING 244
|
||||
#define TK_RANGE 245
|
||||
#define TK_EVERY 246
|
||||
#define TK_ORDER 247
|
||||
#define TK_SLIMIT 248
|
||||
#define TK_SOFFSET 249
|
||||
#define TK_LIMIT 250
|
||||
#define TK_OFFSET 251
|
||||
#define TK_ASC 252
|
||||
#define TK_NULLS 253
|
||||
#define TK_ID 254
|
||||
#define TK_NK_BITNOT 255
|
||||
#define TK_VALUES 256
|
||||
#define TK_IMPORT 257
|
||||
#define TK_NK_SEMI 258
|
||||
#define TK_FILE 259
|
||||
#define TK_OUTPUTTYPE 173
|
||||
#define TK_AGGREGATE 174
|
||||
#define TK_BUFSIZE 175
|
||||
#define TK_STREAM 176
|
||||
#define TK_INTO 177
|
||||
#define TK_TRIGGER 178
|
||||
#define TK_AT_ONCE 179
|
||||
#define TK_WINDOW_CLOSE 180
|
||||
#define TK_IGNORE 181
|
||||
#define TK_EXPIRED 182
|
||||
#define TK_KILL 183
|
||||
#define TK_CONNECTION 184
|
||||
#define TK_TRANSACTION 185
|
||||
#define TK_BALANCE 186
|
||||
#define TK_VGROUP 187
|
||||
#define TK_MERGE 188
|
||||
#define TK_REDISTRIBUTE 189
|
||||
#define TK_SPLIT 190
|
||||
#define TK_DELETE 191
|
||||
#define TK_INSERT 192
|
||||
#define TK_NULL 193
|
||||
#define TK_NK_QUESTION 194
|
||||
#define TK_NK_ARROW 195
|
||||
#define TK_ROWTS 196
|
||||
#define TK_TBNAME 197
|
||||
#define TK_QSTART 198
|
||||
#define TK_QEND 199
|
||||
#define TK_QDURATION 200
|
||||
#define TK_WSTART 201
|
||||
#define TK_WEND 202
|
||||
#define TK_WDURATION 203
|
||||
#define TK_CAST 204
|
||||
#define TK_NOW 205
|
||||
#define TK_TODAY 206
|
||||
#define TK_TIMEZONE 207
|
||||
#define TK_CLIENT_VERSION 208
|
||||
#define TK_SERVER_VERSION 209
|
||||
#define TK_SERVER_STATUS 210
|
||||
#define TK_CURRENT_USER 211
|
||||
#define TK_COUNT 212
|
||||
#define TK_LAST_ROW 213
|
||||
#define TK_BETWEEN 214
|
||||
#define TK_IS 215
|
||||
#define TK_NK_LT 216
|
||||
#define TK_NK_GT 217
|
||||
#define TK_NK_LE 218
|
||||
#define TK_NK_GE 219
|
||||
#define TK_NK_NE 220
|
||||
#define TK_MATCH 221
|
||||
#define TK_NMATCH 222
|
||||
#define TK_CONTAINS 223
|
||||
#define TK_IN 224
|
||||
#define TK_JOIN 225
|
||||
#define TK_INNER 226
|
||||
#define TK_SELECT 227
|
||||
#define TK_DISTINCT 228
|
||||
#define TK_WHERE 229
|
||||
#define TK_PARTITION 230
|
||||
#define TK_BY 231
|
||||
#define TK_SESSION 232
|
||||
#define TK_STATE_WINDOW 233
|
||||
#define TK_SLIDING 234
|
||||
#define TK_FILL 235
|
||||
#define TK_VALUE 236
|
||||
#define TK_NONE 237
|
||||
#define TK_PREV 238
|
||||
#define TK_LINEAR 239
|
||||
#define TK_NEXT 240
|
||||
#define TK_HAVING 241
|
||||
#define TK_RANGE 242
|
||||
#define TK_EVERY 243
|
||||
#define TK_ORDER 244
|
||||
#define TK_SLIMIT 245
|
||||
#define TK_SOFFSET 246
|
||||
#define TK_LIMIT 247
|
||||
#define TK_OFFSET 248
|
||||
#define TK_ASC 249
|
||||
#define TK_NULLS 250
|
||||
#define TK_ID 251
|
||||
#define TK_NK_BITNOT 252
|
||||
#define TK_VALUES 253
|
||||
#define TK_IMPORT 254
|
||||
#define TK_NK_SEMI 255
|
||||
#define TK_FILE 256
|
||||
|
||||
#define TK_NK_SPACE 300
|
||||
#define TK_NK_COMMENT 301
|
||||
|
|
|
@ -328,6 +328,7 @@ typedef struct STableScanPhysiNode {
|
|||
int8_t triggerType;
|
||||
int64_t watermark;
|
||||
int8_t igExpired;
|
||||
bool assignBlockUid;
|
||||
} STableScanPhysiNode;
|
||||
|
||||
typedef STableScanPhysiNode STableSeqScanPhysiNode;
|
||||
|
|
|
@ -375,6 +375,7 @@ typedef struct SQuery {
|
|||
int8_t precision;
|
||||
SCmdMsgInfo* pCmdMsg;
|
||||
int32_t msgType;
|
||||
SArray* pTargetTableList;
|
||||
SArray* pTableList;
|
||||
SArray* pDbList;
|
||||
bool showRewrite;
|
||||
|
|
|
@ -171,8 +171,8 @@ typedef struct {
|
|||
} STaskDispatcherFixedEp;
|
||||
|
||||
typedef struct {
|
||||
// int8_t hashMethod;
|
||||
char stbFullName[TSDB_TABLE_FNAME_LEN];
|
||||
int32_t waitingRspCnt;
|
||||
SUseDbRsp dbInfo;
|
||||
} STaskDispatcherShuffle;
|
||||
|
||||
|
|
|
@ -513,7 +513,6 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_PAR_ONLY_ONE_JSON_TAG TAOS_DEF_ERROR_CODE(0, 0x2633)
|
||||
#define TSDB_CODE_PAR_INCORRECT_NUM_OF_COL TAOS_DEF_ERROR_CODE(0, 0x2634)
|
||||
#define TSDB_CODE_PAR_INCORRECT_TIMESTAMP_VAL TAOS_DEF_ERROR_CODE(0, 0x2635)
|
||||
#define TSDB_CODE_PAR_INVALID_DAYS_VALUE TAOS_DEF_ERROR_CODE(0, 0x2636)
|
||||
#define TSDB_CODE_PAR_OFFSET_LESS_ZERO TAOS_DEF_ERROR_CODE(0, 0x2637)
|
||||
#define TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_BY TAOS_DEF_ERROR_CODE(0, 0x2638)
|
||||
#define TSDB_CODE_PAR_INVALID_TOPIC_QUERY TAOS_DEF_ERROR_CODE(0, 0x2639)
|
||||
|
|
|
@ -222,6 +222,7 @@ typedef struct SRequestObj {
|
|||
int32_t code;
|
||||
SArray* dbList;
|
||||
SArray* tableList;
|
||||
SArray* targetTableList;
|
||||
SQueryExecMetric metric;
|
||||
SRequestSendRecvBody body;
|
||||
bool syncQuery; // todo refactor: async query object
|
||||
|
|
|
@ -235,6 +235,7 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
|
|||
if (TSDB_CODE_SUCCESS == code || NEED_CLIENT_HANDLE_ERROR(code)) {
|
||||
TSWAP(pRequest->dbList, (*pQuery)->pDbList);
|
||||
TSWAP(pRequest->tableList, (*pQuery)->pTableList);
|
||||
TSWAP(pRequest->targetTableList, (*pQuery)->pTargetTableList);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
@ -851,7 +852,7 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
|
|||
|
||||
tscDebug("schedulerExecCb request type %s", TMSG_INFO(pRequest->type));
|
||||
if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) {
|
||||
removeMeta(pTscObj, pRequest->tableList);
|
||||
removeMeta(pTscObj, pRequest->targetTableList);
|
||||
}
|
||||
|
||||
// return to client
|
||||
|
@ -1094,7 +1095,7 @@ SRequestObj* execQuery(uint64_t connId, const char* sql, int sqlLen, bool valida
|
|||
} while (retryNum++ < REQUEST_TOTAL_EXEC_TIMES);
|
||||
|
||||
if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) {
|
||||
removeMeta(pRequest->pTscObj, pRequest->tableList);
|
||||
removeMeta(pRequest->pTscObj, pRequest->targetTableList);
|
||||
}
|
||||
|
||||
return pRequest;
|
||||
|
|
|
@ -687,6 +687,7 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
|
|||
|
||||
TSWAP(pRequest->dbList, (pQuery)->pDbList);
|
||||
TSWAP(pRequest->tableList, (pQuery)->pTableList);
|
||||
TSWAP(pRequest->targetTableList, (pQuery)->pTargetTableList);
|
||||
|
||||
destorySqlParseWrapper(pWrapper);
|
||||
|
||||
|
|
|
@ -693,6 +693,7 @@ int stmtBindBatch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int32_t colIdx) {
|
|||
|
||||
TSWAP(pStmt->exec.pRequest->dbList, pStmt->sql.pQuery->pDbList);
|
||||
TSWAP(pStmt->exec.pRequest->tableList, pStmt->sql.pQuery->pTableList);
|
||||
TSWAP(pStmt->exec.pRequest->targetTableList, pStmt->sql.pQuery->pTargetTableList);
|
||||
|
||||
// if (STMT_TYPE_QUERY == pStmt->sql.queryRes) {
|
||||
// STMT_ERR_RET(stmtRestoreQueryFields(pStmt));
|
||||
|
|
|
@ -89,8 +89,8 @@ static const SSysDbTableSchema userDBSchema[] = {
|
|||
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "retention", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL},
|
||||
{.name = "cache_model", .bytes = TSDB_CACHE_MODEL_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "cache_size", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||
{.name = "cachemodel", .bytes = TSDB_CACHE_MODEL_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "cachesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||
{.name = "wal_level", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
|
||||
{.name = "wal_fsync_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||
{.name = "wal_retention_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||
|
|
|
@ -114,18 +114,26 @@ int32_t mndAddSinkToTask(SMnode* pMnode, SStreamObj* pStream, SStreamTask* pTask
|
|||
|
||||
int32_t mndAddDispatcherToInnerTask(SMnode* pMnode, SStreamObj* pStream, SStreamTask* pTask) {
|
||||
pTask->sinkType = TASK_SINK__NONE;
|
||||
|
||||
bool isShuffle = false;
|
||||
|
||||
if (pStream->fixedSinkVgId == 0) {
|
||||
pTask->dispatchType = TASK_DISPATCH__SHUFFLE;
|
||||
pTask->dispatchMsgType = TDMT_STREAM_TASK_DISPATCH;
|
||||
SDbObj* pDb = mndAcquireDb(pMnode, pStream->targetDb);
|
||||
ASSERT(pDb);
|
||||
|
||||
if (mndExtractDbInfo(pMnode, pDb, &pTask->shuffleDispatcher.dbInfo, NULL) < 0) {
|
||||
ASSERT(0);
|
||||
return -1;
|
||||
if (pDb->cfg.numOfVgroups > 1) {
|
||||
isShuffle = true;
|
||||
pTask->dispatchType = TASK_DISPATCH__SHUFFLE;
|
||||
pTask->dispatchMsgType = TDMT_STREAM_TASK_DISPATCH;
|
||||
if (mndExtractDbInfo(pMnode, pDb, &pTask->shuffleDispatcher.dbInfo, NULL) < 0) {
|
||||
ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
sdbRelease(pMnode->pSdb, pDb);
|
||||
|
||||
sdbRelease(pMnode->pSdb, pDb);
|
||||
}
|
||||
|
||||
if (isShuffle) {
|
||||
memcpy(pTask->shuffleDispatcher.stbFullName, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN);
|
||||
SArray* pVgs = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
|
||||
int32_t sz = taosArrayGetSize(pVgs);
|
||||
|
|
|
@ -468,7 +468,6 @@ typedef struct SStreamScanInfo {
|
|||
SSDataBlock* pUpdateDataRes;
|
||||
// status for tmq
|
||||
// SSchemaWrapper schema;
|
||||
STqOffset offset;
|
||||
SNodeList* pGroupTags;
|
||||
SNode* pTagCond;
|
||||
SNode* pTagIndexCond;
|
||||
|
|
|
@ -193,7 +193,7 @@ SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode) {
|
|||
pBlock->info.calWin = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX};
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SSlotDescNode* pDescNode = (SSlotDescNode*)nodesListGetNode(pNode->pSlots, i);
|
||||
SSlotDescNode* pDescNode = (SSlotDescNode*)nodesListGetNode(pNode->pSlots, i);
|
||||
SColumnInfoData idata =
|
||||
createColumnInfoData(pDescNode->dataType.type, pDescNode->dataType.bytes, pDescNode->slotId);
|
||||
idata.info.scale = pDescNode->dataType.scale;
|
||||
|
@ -267,8 +267,9 @@ int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle,
|
|||
code = metaGetTableEntryByUid(&mr, info->uid);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
metaReaderClear(&mr);
|
||||
*pQualified = false;
|
||||
|
||||
return terrno;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SNode* pTagCondTmp = nodesCloneNode(pTagCond);
|
||||
|
@ -387,7 +388,7 @@ size_t getTableTagsBufLen(const SNodeList* pGroups) {
|
|||
}
|
||||
|
||||
int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode, char* keyBuf, uint64_t* pGroupId) {
|
||||
SMetaReader mr = {0};
|
||||
SMetaReader mr = {0};
|
||||
metaReaderInit(&mr, pMeta, 0);
|
||||
metaGetTableEntryByUid(&mr, uid);
|
||||
|
||||
|
@ -395,7 +396,7 @@ int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode,
|
|||
|
||||
nodesRewriteExprsPostOrder(groupNew, doTranslateTagExpr, &mr);
|
||||
char* isNull = (char*)keyBuf;
|
||||
char* pStart = (char*)keyBuf + sizeof(int8_t)*LIST_LENGTH(pGroupNode);
|
||||
char* pStart = (char*)keyBuf + sizeof(int8_t) * LIST_LENGTH(pGroupNode);
|
||||
|
||||
SNode* pNode;
|
||||
int32_t index = 0;
|
||||
|
@ -441,7 +442,7 @@ int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode,
|
|||
}
|
||||
}
|
||||
|
||||
int32_t len = (int32_t)(pStart - (char*)keyBuf);
|
||||
int32_t len = (int32_t)(pStart - (char*)keyBuf);
|
||||
*pGroupId = calcGroupId(keyBuf, len);
|
||||
|
||||
nodesDestroyList(groupNew);
|
||||
|
|
|
@ -14,12 +14,12 @@
|
|||
*/
|
||||
|
||||
#include "executor.h"
|
||||
#include "tref.h"
|
||||
#include "executorimpl.h"
|
||||
#include "planner.h"
|
||||
#include "tdatablock.h"
|
||||
#include "vnode.h"
|
||||
#include "tref.h"
|
||||
#include "tudf.h"
|
||||
#include "vnode.h"
|
||||
|
||||
static TdThreadOnce initPoolOnce = PTHREAD_ONCE_INIT;
|
||||
int32_t exchangeObjRefPool = -1;
|
||||
|
@ -95,16 +95,6 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
|
|||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
int32_t qStreamScanSnapshot(qTaskInfo_t tinfo) {
|
||||
if (tinfo == NULL) {
|
||||
return TSDB_CODE_QRY_APP_ERROR;
|
||||
}
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
return doSetStreamBlock(pTaskInfo->pRoot, NULL, 0, STREAM_INPUT__TABLE_SCAN, 0, NULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type, bool assignUid) {
|
||||
return qSetMultiStreamInput(tinfo, input, 1, type, assignUid);
|
||||
}
|
||||
|
@ -258,8 +248,8 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
|
|||
}
|
||||
|
||||
// todo refactor STableList
|
||||
size_t bufLen = (pScanInfo->pGroupTags != NULL)? getTableTagsBufLen(pScanInfo->pGroupTags):0;
|
||||
char* keyBuf = NULL;
|
||||
size_t bufLen = (pScanInfo->pGroupTags != NULL) ? getTableTagsBufLen(pScanInfo->pGroupTags) : 0;
|
||||
char* keyBuf = NULL;
|
||||
if (bufLen > 0) {
|
||||
keyBuf = taosMemoryMalloc(bufLen);
|
||||
if (keyBuf == NULL) {
|
||||
|
@ -267,13 +257,13 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
|
|||
}
|
||||
}
|
||||
|
||||
for(int32_t i = 0; i < taosArrayGetSize(qa); ++i) {
|
||||
for (int32_t i = 0; i < taosArrayGetSize(qa); ++i) {
|
||||
uint64_t* uid = taosArrayGet(qa, i);
|
||||
STableKeyInfo keyInfo = {.uid = *uid, .groupId = 0};
|
||||
|
||||
if (bufLen > 0) {
|
||||
code = getGroupIdFromTagsVal(pScanInfo->readHandle.meta, keyInfo.uid, pScanInfo->pGroupTags, keyBuf,
|
||||
&keyInfo.groupId);
|
||||
&keyInfo.groupId);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -352,7 +342,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
|
|||
}
|
||||
}
|
||||
|
||||
_error:
|
||||
_error:
|
||||
// if failed to add ref for all tables in this query, abort current query
|
||||
return code;
|
||||
}
|
||||
|
@ -576,11 +566,6 @@ const SSchemaWrapper* qExtractSchemaFromStreamScanner(void* scanner) {
|
|||
return pInfo->tqReader->pSchemaWrapper;
|
||||
}
|
||||
|
||||
const STqOffset* qExtractStatusFromStreamScanner(void* scanner) {
|
||||
SStreamScanInfo* pInfo = scanner;
|
||||
return &pInfo->offset;
|
||||
}
|
||||
|
||||
void* qStreamExtractMetaMsg(qTaskInfo_t tinfo) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
|
||||
|
@ -603,12 +588,17 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) {
|
|||
while (1) {
|
||||
uint8_t type = pOperator->operatorType;
|
||||
pOperator->status = OP_OPENED;
|
||||
if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
|
||||
SStreamScanInfo* pInfo = pOperator->info;
|
||||
if (pOffset->type == TMQ_OFFSET__LOG) {
|
||||
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
|
||||
tsdbReaderClose(pTSInfo->dataReader);
|
||||
pTSInfo->dataReader = NULL;
|
||||
// TODO add more check
|
||||
if (type != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
|
||||
ASSERT(pOperator->numOfDownstream == 1);
|
||||
pOperator = pOperator->pDownstream[0];
|
||||
}
|
||||
|
||||
SStreamScanInfo* pInfo = pOperator->info;
|
||||
if (pOffset->type == TMQ_OFFSET__LOG) {
|
||||
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
|
||||
tsdbReaderClose(pTSInfo->dataReader);
|
||||
pTSInfo->dataReader = NULL;
|
||||
#if 0
|
||||
if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus) &&
|
||||
pInfo->tqReader->pWalReader->curVersion != pOffset->version) {
|
||||
|
@ -617,102 +607,74 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) {
|
|||
ASSERT(0);
|
||||
}
|
||||
#endif
|
||||
if (tqSeekVer(pInfo->tqReader, pOffset->version + 1) < 0) {
|
||||
if (tqSeekVer(pInfo->tqReader, pOffset->version + 1) < 0) {
|
||||
return -1;
|
||||
}
|
||||
ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version + 1);
|
||||
} else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
|
||||
/*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/
|
||||
int64_t uid = pOffset->uid;
|
||||
int64_t ts = pOffset->ts;
|
||||
|
||||
if (uid == 0) {
|
||||
if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
|
||||
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
|
||||
uid = pTableInfo->uid;
|
||||
ts = INT64_MIN;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version + 1);
|
||||
} else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
|
||||
/*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/
|
||||
int64_t uid = pOffset->uid;
|
||||
int64_t ts = pOffset->ts;
|
||||
}
|
||||
|
||||
if (uid == 0) {
|
||||
if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
|
||||
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
|
||||
uid = pTableInfo->uid;
|
||||
ts = INT64_MIN;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/*if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA ||*/
|
||||
/*pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) {*/
|
||||
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
|
||||
int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
|
||||
/*if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA ||*/
|
||||
/*pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) {*/
|
||||
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
|
||||
int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
|
||||
|
||||
#ifndef NDEBUG
|
||||
|
||||
qDebug("switch to next table %ld (cursor %d), %ld rows returned", uid, pTableScanInfo->currentTable,
|
||||
pInfo->pTableScanOp->resultInfo.totalRows);
|
||||
pInfo->pTableScanOp->resultInfo.totalRows = 0;
|
||||
qDebug("switch to next table %ld (cursor %d), %ld rows returned", uid, pTableScanInfo->currentTable,
|
||||
pInfo->pTableScanOp->resultInfo.totalRows);
|
||||
pInfo->pTableScanOp->resultInfo.totalRows = 0;
|
||||
#endif
|
||||
|
||||
bool found = false;
|
||||
for (int32_t i = 0; i < tableSz; i++) {
|
||||
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i);
|
||||
if (pTableInfo->uid == uid) {
|
||||
found = true;
|
||||
pTableScanInfo->currentTable = i;
|
||||
break;
|
||||
}
|
||||
bool found = false;
|
||||
for (int32_t i = 0; i < tableSz; i++) {
|
||||
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i);
|
||||
if (pTableInfo->uid == uid) {
|
||||
found = true;
|
||||
pTableScanInfo->currentTable = i;
|
||||
break;
|
||||
}
|
||||
|
||||
// TODO after dropping table, table may be not found
|
||||
ASSERT(found);
|
||||
|
||||
if (pTableScanInfo->dataReader == NULL) {
|
||||
if (tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &pTableScanInfo->cond,
|
||||
pTaskInfo->tableqinfoList.pTableList, &pTableScanInfo->dataReader, NULL) < 0 ||
|
||||
pTableScanInfo->dataReader == NULL) {
|
||||
ASSERT(0);
|
||||
}
|
||||
}
|
||||
|
||||
tsdbSetTableId(pTableScanInfo->dataReader, uid);
|
||||
int64_t oldSkey = pTableScanInfo->cond.twindows.skey;
|
||||
pTableScanInfo->cond.twindows.skey = ts + 1;
|
||||
tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
|
||||
pTableScanInfo->cond.twindows.skey = oldSkey;
|
||||
pTableScanInfo->scanTimes = 0;
|
||||
|
||||
qDebug("tsdb reader offset seek to uid %ld ts %ld, table cur set to %d , all table num %d", uid, ts,
|
||||
pTableScanInfo->currentTable, tableSz);
|
||||
/*}*/
|
||||
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
return 0;
|
||||
|
||||
// TODO after dropping table, table may be not found
|
||||
ASSERT(found);
|
||||
|
||||
if (pTableScanInfo->dataReader == NULL) {
|
||||
if (tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &pTableScanInfo->cond,
|
||||
pTaskInfo->tableqinfoList.pTableList, &pTableScanInfo->dataReader, NULL) < 0 ||
|
||||
pTableScanInfo->dataReader == NULL) {
|
||||
ASSERT(0);
|
||||
}
|
||||
}
|
||||
|
||||
tsdbSetTableId(pTableScanInfo->dataReader, uid);
|
||||
int64_t oldSkey = pTableScanInfo->cond.twindows.skey;
|
||||
pTableScanInfo->cond.twindows.skey = ts + 1;
|
||||
tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
|
||||
pTableScanInfo->cond.twindows.skey = oldSkey;
|
||||
pTableScanInfo->scanTimes = 0;
|
||||
|
||||
qDebug("tsdb reader offset seek to uid %ld ts %ld, table cur set to %d , all table num %d", uid, ts,
|
||||
pTableScanInfo->currentTable, tableSz);
|
||||
/*}*/
|
||||
|
||||
} else {
|
||||
ASSERT(pOperator->numOfDownstream == 1);
|
||||
pOperator = pOperator->pDownstream[0];
|
||||
ASSERT(0);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#if 0
|
||||
int32_t qStreamPrepareTsdbScan(qTaskInfo_t tinfo, uint64_t uid, int64_t ts) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
|
||||
if (uid == 0) {
|
||||
if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
|
||||
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
|
||||
uid = pTableInfo->uid;
|
||||
ts = INT64_MIN;
|
||||
}
|
||||
}
|
||||
|
||||
return doPrepareScan(pTaskInfo->pRoot, uid, ts);
|
||||
}
|
||||
|
||||
int32_t qGetStreamScanStatus(qTaskInfo_t tinfo, uint64_t* uid, int64_t* ts) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
|
||||
return doGetScanStatus(pTaskInfo->pRoot, uid, ts);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1396,13 +1396,11 @@ SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNo
|
|||
|
||||
static void destroyStreamScanOperatorInfo(void* param, int32_t numOfOutput) {
|
||||
SStreamScanInfo* pStreamScan = (SStreamScanInfo*)param;
|
||||
#if 1
|
||||
if (pStreamScan->pTableScanOp && pStreamScan->pTableScanOp->info) {
|
||||
STableScanInfo* pTableScanInfo = pStreamScan->pTableScanOp->info;
|
||||
destroyTableScanOperatorInfo(pTableScanInfo, numOfOutput);
|
||||
taosMemoryFreeClear(pStreamScan->pTableScanOp);
|
||||
}
|
||||
#endif
|
||||
if (pStreamScan->tqReader) {
|
||||
tqCloseReader(pStreamScan->tqReader);
|
||||
}
|
||||
|
@ -1528,6 +1526,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
|
|||
pInfo->pDeleteDataRes = createSpecialDataBlock(STREAM_DELETE_DATA);
|
||||
pInfo->updateWin = (STimeWindow){.skey = INT64_MAX, .ekey = INT64_MAX};
|
||||
pInfo->pUpdateDataRes = createSpecialDataBlock(STREAM_CLEAR);
|
||||
pInfo->assignBlockUid = pTableScanNode->assignBlockUid;
|
||||
|
||||
pOperator->name = "StreamScanOperator";
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN;
|
||||
|
@ -2855,7 +2854,8 @@ int32_t stopGroupTableMergeScan(SOperatorInfo* pOperator) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, SSDataBlock* pResBlock, int32_t capacity, SOperatorInfo* pOperator) {
|
||||
SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, SSDataBlock* pResBlock, int32_t capacity,
|
||||
SOperatorInfo* pOperator) {
|
||||
STableMergeScanInfo* pInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
|
@ -2874,7 +2874,6 @@ SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, SSDataBlock*
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
qDebug("%s get sorted row blocks, rows:%d", GET_TASKID(pTaskInfo), pResBlock->info.rows);
|
||||
return (pResBlock->info.rows > 0) ? pResBlock : NULL;
|
||||
}
|
||||
|
@ -2905,7 +2904,8 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
|
|||
}
|
||||
SSDataBlock* pBlock = NULL;
|
||||
while (pInfo->tableStartIndex < tableListSize) {
|
||||
pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pInfo->pResBlock, pOperator->resultInfo.capacity, pOperator);
|
||||
pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pInfo->pResBlock, pOperator->resultInfo.capacity,
|
||||
pOperator);
|
||||
if (pBlock != NULL) {
|
||||
pBlock->info.groupId = pInfo->groupId;
|
||||
pOperator->resultInfo.totalRows += pBlock->info.rows;
|
||||
|
|
|
@ -773,9 +773,9 @@ int32_t binarySearch(void* keyList, int num, TSKEY key, int order, __get_value_f
|
|||
}
|
||||
|
||||
int32_t comparePullWinKey(void* pKey, void* data, int32_t index) {
|
||||
SArray* res = (SArray*)data;
|
||||
SArray* res = (SArray*)data;
|
||||
SPullWindowInfo* pos = taosArrayGet(res, index);
|
||||
SPullWindowInfo* pData = (SPullWindowInfo*) pKey;
|
||||
SPullWindowInfo* pData = (SPullWindowInfo*)pKey;
|
||||
if (pData->window.skey == pos->window.skey) {
|
||||
if (pData->groupId > pos->groupId) {
|
||||
return 1;
|
||||
|
@ -810,7 +810,7 @@ static int32_t savePullWindow(SPullWindowInfo* pPullInfo, SArray* pPullWins) {
|
|||
int32_t compareResKey(void* pKey, void* data, int32_t index) {
|
||||
SArray* res = (SArray*)data;
|
||||
SResKeyPos* pos = taosArrayGetP(res, index);
|
||||
SWinRes* pData = (SWinRes*) pKey;
|
||||
SWinRes* pData = (SWinRes*)pKey;
|
||||
if (pData->ts == *(int64_t*)pos->key) {
|
||||
if (pData->groupId > pos->groupId) {
|
||||
return 1;
|
||||
|
@ -880,7 +880,7 @@ int64_t getWinReskey(void* data, int32_t index) {
|
|||
int32_t compareWinRes(void* pKey, void* data, int32_t index) {
|
||||
SArray* res = (SArray*)data;
|
||||
SWinRes* pos = taosArrayGetP(res, index);
|
||||
SResKeyPos* pData = (SResKeyPos*) pKey;
|
||||
SResKeyPos* pData = (SResKeyPos*)pKey;
|
||||
if (*(int64_t*)pData->key == pos->ts) {
|
||||
if (pData->groupId > pos->groupId) {
|
||||
return 1;
|
||||
|
@ -1417,15 +1417,15 @@ static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval*
|
|||
SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
|
||||
pGpDatas = (uint64_t*)pGpCol->pData;
|
||||
}
|
||||
int32_t step = 0;
|
||||
int32_t startPos = 0;
|
||||
int32_t step = 0;
|
||||
int32_t startPos = 0;
|
||||
for (int32_t i = 0; i < pBlock->info.rows; i++) {
|
||||
SResultRowInfo dumyInfo;
|
||||
dumyInfo.cur.pageId = -1;
|
||||
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, startTsCols[i], pInterval, TSDB_ORDER_ASC);
|
||||
while (win.ekey <= endTsCols[i]) {
|
||||
uint64_t winGpId = pGpDatas ? pGpDatas[startPos] : pBlock->info.groupId;
|
||||
bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TSKEY), winGpId, numOfOutput);
|
||||
bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TSKEY), winGpId, numOfOutput);
|
||||
if (pUpWins && res) {
|
||||
SWinRes winRes = {.ts = win.skey, .groupId = winGpId};
|
||||
taosArrayPush(pUpWins, &winRes);
|
||||
|
@ -1596,7 +1596,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
|
|||
}
|
||||
|
||||
if (pBlock->info.type == STREAM_NORMAL) {
|
||||
//set input version
|
||||
// set input version
|
||||
pTaskInfo->version = pBlock->info.version;
|
||||
}
|
||||
|
||||
|
@ -1644,7 +1644,7 @@ static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput) {
|
|||
}
|
||||
|
||||
static void freeItem(void* param) {
|
||||
SGroupKeys *pKey = (SGroupKeys*) param;
|
||||
SGroupKeys* pKey = (SGroupKeys*)param;
|
||||
taosMemoryFree(pKey->pData);
|
||||
}
|
||||
|
||||
|
@ -2347,10 +2347,10 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode
|
|||
initResultSizeInfo(&pOperator->resultInfo, 4096);
|
||||
|
||||
pInfo->pFillColInfo = createFillColInfo(pExprInfo, numOfExprs, (SNodeListNode*)pInterpPhyNode->pFillValues);
|
||||
pInfo->pRes = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
|
||||
pInfo->win = pInterpPhyNode->timeRange;
|
||||
pInfo->pRes = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
|
||||
pInfo->win = pInterpPhyNode->timeRange;
|
||||
pInfo->interval.interval = pInterpPhyNode->interval;
|
||||
pInfo->current = pInfo->win.skey;
|
||||
pInfo->current = pInfo->win.skey;
|
||||
|
||||
pOperator->name = "TimeSliceOperator";
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC;
|
||||
|
@ -2970,8 +2970,8 @@ SSDataBlock* createSpecialDataBlock(EStreamType type) {
|
|||
pBlock->info.groupId = 0;
|
||||
pBlock->info.rows = 0;
|
||||
pBlock->info.type = type;
|
||||
pBlock->info.rowSize = sizeof(TSKEY) + sizeof(TSKEY) + sizeof(uint64_t) +
|
||||
sizeof(uint64_t) + sizeof(TSKEY) + sizeof(TSKEY);
|
||||
pBlock->info.rowSize =
|
||||
sizeof(TSKEY) + sizeof(TSKEY) + sizeof(uint64_t) + sizeof(uint64_t) + sizeof(TSKEY) + sizeof(TSKEY);
|
||||
|
||||
pBlock->pDataBlock = taosArrayInit(6, sizeof(SColumnInfoData));
|
||||
SColumnInfoData infoData = {0};
|
||||
|
@ -4325,10 +4325,10 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
|
|||
} else {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
|
||||
blockDataEnsureCapacity(pAggSup->pScanBlock, pSDataBlock->info.rows);
|
||||
SColumnInfoData* pKeyColInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->stateCol.slotId);
|
||||
SColumnInfoData* pKeyColInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->stateCol.slotId);
|
||||
for (int32_t i = 0; i < pSDataBlock->info.rows; i += winRows) {
|
||||
if (pInfo->ignoreExpiredData && isOverdue(tsCols[i], &pInfo->twAggSup)) {
|
||||
i++;
|
||||
|
@ -4343,7 +4343,7 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
|
|||
&allEqual, pInfo->pSeDeleted);
|
||||
if (!allEqual) {
|
||||
appendOneRow(pAggSup->pScanBlock, &pCurWin->winInfo.win.skey, &pCurWin->winInfo.win.ekey,
|
||||
&pSDataBlock->info.groupId);
|
||||
&pSDataBlock->info.groupId);
|
||||
taosHashRemove(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition));
|
||||
deleteWindow(pAggSup->pCurWins, winIndex);
|
||||
continue;
|
||||
|
|
|
@ -176,7 +176,6 @@ db_options(A) ::= db_options(B) CACHESIZE NK_INTEGER(C).
|
|||
db_options(A) ::= db_options(B) COMP NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_COMP, &C); }
|
||||
db_options(A) ::= db_options(B) DURATION NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_DAYS, &C); }
|
||||
db_options(A) ::= db_options(B) DURATION NK_VARIABLE(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_DAYS, &C); }
|
||||
db_options(A) ::= db_options(B) FSYNC NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_FSYNC, &C); }
|
||||
db_options(A) ::= db_options(B) MAXROWS NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_MAXROWS, &C); }
|
||||
db_options(A) ::= db_options(B) MINROWS NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_MINROWS, &C); }
|
||||
db_options(A) ::= db_options(B) KEEP integer_list(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_KEEP, C); }
|
||||
|
@ -186,11 +185,12 @@ db_options(A) ::= db_options(B) PAGESIZE NK_INTEGER(C).
|
|||
db_options(A) ::= db_options(B) PRECISION NK_STRING(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_PRECISION, &C); }
|
||||
db_options(A) ::= db_options(B) REPLICA NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_REPLICA, &C); }
|
||||
db_options(A) ::= db_options(B) STRICT NK_STRING(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_STRICT, &C); }
|
||||
db_options(A) ::= db_options(B) WAL NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_WAL, &C); }
|
||||
db_options(A) ::= db_options(B) VGROUPS NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_VGROUPS, &C); }
|
||||
db_options(A) ::= db_options(B) SINGLE_STABLE NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_SINGLE_STABLE, &C); }
|
||||
db_options(A) ::= db_options(B) RETENTIONS retention_list(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_RETENTIONS, C); }
|
||||
db_options(A) ::= db_options(B) SCHEMALESS NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_SCHEMALESS, &C); }
|
||||
db_options(A) ::= db_options(B) WAL_LEVEL NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_WAL, &C); }
|
||||
db_options(A) ::= db_options(B) WAL_FSYNC_PERIOD NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_FSYNC, &C); }
|
||||
db_options(A) ::= db_options(B) WAL_RETENTION_PERIOD NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_WAL_RETENTION_PERIOD, &C); }
|
||||
db_options(A) ::= db_options(B) WAL_RETENTION_PERIOD NK_MINUS(D) NK_INTEGER(C). {
|
||||
SToken t = D;
|
||||
|
@ -214,13 +214,13 @@ alter_db_options(A) ::= alter_db_options(B) alter_db_option(C).
|
|||
//alter_db_option(A) ::= BUFFER NK_INTEGER(B). { A.type = DB_OPTION_BUFFER; A.val = B; }
|
||||
alter_db_option(A) ::= CACHEMODEL NK_STRING(B). { A.type = DB_OPTION_CACHEMODEL; A.val = B; }
|
||||
alter_db_option(A) ::= CACHESIZE NK_INTEGER(B). { A.type = DB_OPTION_CACHESIZE; A.val = B; }
|
||||
alter_db_option(A) ::= FSYNC NK_INTEGER(B). { A.type = DB_OPTION_FSYNC; A.val = B; }
|
||||
alter_db_option(A) ::= WAL_FSYNC_PERIOD NK_INTEGER(B). { A.type = DB_OPTION_FSYNC; A.val = B; }
|
||||
alter_db_option(A) ::= KEEP integer_list(B). { A.type = DB_OPTION_KEEP; A.pList = B; }
|
||||
alter_db_option(A) ::= KEEP variable_list(B). { A.type = DB_OPTION_KEEP; A.pList = B; }
|
||||
//alter_db_option(A) ::= PAGES NK_INTEGER(B). { A.type = DB_OPTION_PAGES; A.val = B; }
|
||||
//alter_db_option(A) ::= REPLICA NK_INTEGER(B). { A.type = DB_OPTION_REPLICA; A.val = B; }
|
||||
//alter_db_option(A) ::= STRICT NK_STRING(B). { A.type = DB_OPTION_STRICT; A.val = B; }
|
||||
alter_db_option(A) ::= WAL NK_INTEGER(B). { A.type = DB_OPTION_WAL; A.val = B; }
|
||||
alter_db_option(A) ::= WAL_LEVEL NK_INTEGER(B). { A.type = DB_OPTION_WAL; A.val = B; }
|
||||
|
||||
%type integer_list { SNodeList* }
|
||||
%destructor integer_list { nodesDestroyList($$); }
|
||||
|
@ -476,7 +476,7 @@ explain_options(A) ::= explain_options(B) VERBOSE NK_BOOL(C).
|
|||
explain_options(A) ::= explain_options(B) RATIO NK_FLOAT(C). { A = setExplainRatio(pCxt, B, &C); }
|
||||
|
||||
/************************************************ compact *************************************************************/
|
||||
cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP. { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); }
|
||||
//cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP. { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); }
|
||||
|
||||
/************************************************ create/drop function ************************************************/
|
||||
cmd ::= CREATE agg_func_opt(A) FUNCTION not_exists_opt(F) function_name(B)
|
||||
|
@ -525,7 +525,7 @@ dnode_list(A) ::= DNODE NK_INTEGER(B).
|
|||
dnode_list(A) ::= dnode_list(B) DNODE NK_INTEGER(C). { A = addNodeToList(pCxt, B, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &C)); }
|
||||
|
||||
/************************************************ syncdb **************************************************************/
|
||||
cmd ::= SYNCDB db_name(A) REPLICA. { pCxt->pRootNode = createSyncdbStmt(pCxt, &A); }
|
||||
//cmd ::= SYNCDB db_name(A) REPLICA. { pCxt->pRootNode = createSyncdbStmt(pCxt, &A); }
|
||||
|
||||
/************************************************ syncdb **************************************************************/
|
||||
cmd ::= DELETE FROM full_table_name(A) where_clause_opt(B). { pCxt->pRootNode = createDeleteStmt(pCxt, A, B); }
|
||||
|
|
|
@ -29,291 +29,224 @@ typedef struct SKeyword {
|
|||
// clang-format off
|
||||
// keywords in sql string
|
||||
static SKeyword keywordTable[] = {
|
||||
{"ACCOUNT", TK_ACCOUNT},
|
||||
{"ACCOUNTS", TK_ACCOUNTS},
|
||||
{"ADD", TK_ADD},
|
||||
{"AGGREGATE", TK_AGGREGATE},
|
||||
{"ALL", TK_ALL},
|
||||
{"ALTER", TK_ALTER},
|
||||
{"ANALYZE", TK_ANALYZE},
|
||||
{"AND", TK_AND},
|
||||
{"APPS", TK_APPS},
|
||||
{"AS", TK_AS},
|
||||
{"ASC", TK_ASC},
|
||||
{"AT_ONCE", TK_AT_ONCE},
|
||||
{"BALANCE", TK_BALANCE},
|
||||
{"BETWEEN", TK_BETWEEN},
|
||||
{"BINARY", TK_BINARY},
|
||||
{"BIGINT", TK_BIGINT},
|
||||
{"BNODE", TK_BNODE},
|
||||
{"BNODES", TK_BNODES},
|
||||
{"BOOL", TK_BOOL},
|
||||
{"BUFFER", TK_BUFFER},
|
||||
{"BUFSIZE", TK_BUFSIZE},
|
||||
{"BY", TK_BY},
|
||||
{"CACHE", TK_CACHE},
|
||||
{"CACHEMODEL", TK_CACHEMODEL},
|
||||
{"CACHESIZE", TK_CACHESIZE},
|
||||
{"CAST", TK_CAST},
|
||||
{"CLIENT_VERSION", TK_CLIENT_VERSION},
|
||||
{"CLUSTER", TK_CLUSTER},
|
||||
{"COLUMN", TK_COLUMN},
|
||||
{"COMMENT", TK_COMMENT},
|
||||
{"COMP", TK_COMP},
|
||||
{"COMPACT", TK_COMPACT},
|
||||
{"CONNS", TK_CONNS},
|
||||
{"CONNECTION", TK_CONNECTION},
|
||||
{"CONNECTIONS", TK_CONNECTIONS},
|
||||
{"CONSUMER", TK_CONSUMER},
|
||||
{"CONSUMERS", TK_CONSUMERS},
|
||||
{"CONTAINS", TK_CONTAINS},
|
||||
{"COUNT", TK_COUNT},
|
||||
{"CREATE", TK_CREATE},
|
||||
{"CURRENT_USER", TK_CURRENT_USER},
|
||||
{"DATABASE", TK_DATABASE},
|
||||
{"DATABASES", TK_DATABASES},
|
||||
{"DBS", TK_DBS},
|
||||
{"DELETE", TK_DELETE},
|
||||
{"DESC", TK_DESC},
|
||||
{"DESCRIBE", TK_DESCRIBE},
|
||||
{"DISTINCT", TK_DISTINCT},
|
||||
{"DISTRIBUTED", TK_DISTRIBUTED},
|
||||
{"DNODE", TK_DNODE},
|
||||
{"DNODES", TK_DNODES},
|
||||
{"DOUBLE", TK_DOUBLE},
|
||||
{"DROP", TK_DROP},
|
||||
{"DURATION", TK_DURATION},
|
||||
{"ENABLE", TK_ENABLE},
|
||||
{"EXISTS", TK_EXISTS},
|
||||
{"EXPIRED", TK_EXPIRED},
|
||||
{"EXPLAIN", TK_EXPLAIN},
|
||||
{"EVERY", TK_EVERY},
|
||||
{"FILE", TK_FILE},
|
||||
{"FILL", TK_FILL},
|
||||
{"FIRST", TK_FIRST},
|
||||
{"FLOAT", TK_FLOAT},
|
||||
{"FLUSH", TK_FLUSH},
|
||||
{"FROM", TK_FROM},
|
||||
{"FSYNC", TK_FSYNC},
|
||||
{"FUNCTION", TK_FUNCTION},
|
||||
{"FUNCTIONS", TK_FUNCTIONS},
|
||||
{"GRANT", TK_GRANT},
|
||||
{"GRANTS", TK_GRANTS},
|
||||
{"GROUP", TK_GROUP},
|
||||
{"HAVING", TK_HAVING},
|
||||
{"IF", TK_IF},
|
||||
{"IGNORE", TK_IGNORE},
|
||||
{"IMPORT", TK_IMPORT},
|
||||
{"IN", TK_IN},
|
||||
{"INDEX", TK_INDEX},
|
||||
{"INDEXES", TK_INDEXES},
|
||||
{"INNER", TK_INNER},
|
||||
{"INT", TK_INT},
|
||||
{"INSERT", TK_INSERT},
|
||||
{"INTEGER", TK_INTEGER},
|
||||
{"INTERVAL", TK_INTERVAL},
|
||||
{"INTO", TK_INTO},
|
||||
{"IS", TK_IS},
|
||||
{"JOIN", TK_JOIN},
|
||||
{"JSON", TK_JSON},
|
||||
{"KEEP", TK_KEEP},
|
||||
{"KILL", TK_KILL},
|
||||
{"LAST", TK_LAST},
|
||||
{"LAST_ROW", TK_LAST_ROW},
|
||||
{"LICENCE", TK_LICENCE},
|
||||
{"LIKE", TK_LIKE},
|
||||
{"LIMIT", TK_LIMIT},
|
||||
{"LINEAR", TK_LINEAR},
|
||||
{"LOCAL", TK_LOCAL},
|
||||
{"MATCH", TK_MATCH},
|
||||
{"MAXROWS", TK_MAXROWS},
|
||||
{"MAX_DELAY", TK_MAX_DELAY},
|
||||
{"MERGE", TK_MERGE},
|
||||
{"META", TK_META},
|
||||
{"MINROWS", TK_MINROWS},
|
||||
{"MINUS", TK_MINUS},
|
||||
{"MNODE", TK_MNODE},
|
||||
{"MNODES", TK_MNODES},
|
||||
{"MODIFY", TK_MODIFY},
|
||||
{"MODULES", TK_MODULES},
|
||||
{"NCHAR", TK_NCHAR},
|
||||
{"NEXT", TK_NEXT},
|
||||
{"NMATCH", TK_NMATCH},
|
||||
{"NONE", TK_NONE},
|
||||
{"NOT", TK_NOT},
|
||||
{"NOW", TK_NOW},
|
||||
{"NULL", TK_NULL},
|
||||
{"NULLS", TK_NULLS},
|
||||
{"OFFSET", TK_OFFSET},
|
||||
{"ON", TK_ON},
|
||||
{"OR", TK_OR},
|
||||
{"ORDER", TK_ORDER},
|
||||
{"OUTPUTTYPE", TK_OUTPUTTYPE},
|
||||
{"PARTITION", TK_PARTITION},
|
||||
{"PASS", TK_PASS},
|
||||
{"PAGES", TK_PAGES},
|
||||
{"PAGESIZE", TK_PAGESIZE},
|
||||
{"PORT", TK_PORT},
|
||||
{"PPS", TK_PPS},
|
||||
{"PRECISION", TK_PRECISION},
|
||||
// {"PRIVILEGE", TK_PRIVILEGE},
|
||||
{"PREV", TK_PREV},
|
||||
{"QNODE", TK_QNODE},
|
||||
{"QNODES", TK_QNODES},
|
||||
{"QTIME", TK_QTIME},
|
||||
{"QUERIES", TK_QUERIES},
|
||||
{"QUERY", TK_QUERY},
|
||||
{"RANGE", TK_RANGE},
|
||||
{"RATIO", TK_RATIO},
|
||||
{"READ", TK_READ},
|
||||
{"REDISTRIBUTE", TK_REDISTRIBUTE},
|
||||
{"RENAME", TK_RENAME},
|
||||
{"REPLICA", TK_REPLICA},
|
||||
{"RESET", TK_RESET},
|
||||
{"RETENTIONS", TK_RETENTIONS},
|
||||
{"REVOKE", TK_REVOKE},
|
||||
{"ROLLUP", TK_ROLLUP},
|
||||
{"SCHEMALESS", TK_SCHEMALESS},
|
||||
{"SCORES", TK_SCORES},
|
||||
{"SELECT", TK_SELECT},
|
||||
{"SERVER_STATUS", TK_SERVER_STATUS},
|
||||
{"SERVER_VERSION", TK_SERVER_VERSION},
|
||||
{"SESSION", TK_SESSION},
|
||||
{"SET", TK_SET},
|
||||
{"SHOW", TK_SHOW},
|
||||
{"SINGLE_STABLE", TK_SINGLE_STABLE},
|
||||
{"SLIDING", TK_SLIDING},
|
||||
{"SLIMIT", TK_SLIMIT},
|
||||
{"SMA", TK_SMA},
|
||||
{"SMALLINT", TK_SMALLINT},
|
||||
{"SNODE", TK_SNODE},
|
||||
{"SNODES", TK_SNODES},
|
||||
{"SOFFSET", TK_SOFFSET},
|
||||
{"SPLIT", TK_SPLIT},
|
||||
{"STABLE", TK_STABLE},
|
||||
{"STABLES", TK_STABLES},
|
||||
{"STATE", TK_STATE},
|
||||
{"STATE_WINDOW", TK_STATE_WINDOW},
|
||||
{"STORAGE", TK_STORAGE},
|
||||
{"STREAM", TK_STREAM},
|
||||
{"STREAMS", TK_STREAMS},
|
||||
{"STRICT", TK_STRICT},
|
||||
{"SUBSCRIPTIONS", TK_SUBSCRIPTIONS},
|
||||
{"SYNCDB", TK_SYNCDB},
|
||||
{"SYSINFO", TK_SYSINFO},
|
||||
{"TABLE", TK_TABLE},
|
||||
{"TABLES", TK_TABLES},
|
||||
{"TAG", TK_TAG},
|
||||
{"TAGS", TK_TAGS},
|
||||
{"TBNAME", TK_TBNAME},
|
||||
{"TIMESTAMP", TK_TIMESTAMP},
|
||||
{"TIMEZONE", TK_TIMEZONE},
|
||||
{"TINYINT", TK_TINYINT},
|
||||
{"TO", TK_TO},
|
||||
{"TODAY", TK_TODAY},
|
||||
{"TOPIC", TK_TOPIC},
|
||||
{"TOPICS", TK_TOPICS},
|
||||
{"TRANSACTION", TK_TRANSACTION},
|
||||
{"TRANSACTIONS", TK_TRANSACTIONS},
|
||||
{"TRIGGER", TK_TRIGGER},
|
||||
{"TRIM", TK_TRIM},
|
||||
{"TSERIES", TK_TSERIES},
|
||||
{"TTL", TK_TTL},
|
||||
{"UNION", TK_UNION},
|
||||
{"UNSIGNED", TK_UNSIGNED},
|
||||
{"USE", TK_USE},
|
||||
{"USER", TK_USER},
|
||||
{"USERS", TK_USERS},
|
||||
{"USING", TK_USING},
|
||||
{"VALUE", TK_VALUE},
|
||||
{"VALUES", TK_VALUES},
|
||||
{"VARCHAR", TK_VARCHAR},
|
||||
{"VARIABLES", TK_VARIABLES},
|
||||
{"VERBOSE", TK_VERBOSE},
|
||||
{"VGROUP", TK_VGROUP},
|
||||
{"VGROUPS", TK_VGROUPS},
|
||||
{"VNODES", TK_VNODES},
|
||||
{"WAL", TK_WAL},
|
||||
{"ACCOUNT", TK_ACCOUNT},
|
||||
{"ACCOUNTS", TK_ACCOUNTS},
|
||||
{"ADD", TK_ADD},
|
||||
{"AGGREGATE", TK_AGGREGATE},
|
||||
{"ALL", TK_ALL},
|
||||
{"ALTER", TK_ALTER},
|
||||
{"ANALYZE", TK_ANALYZE},
|
||||
{"AND", TK_AND},
|
||||
{"APPS", TK_APPS},
|
||||
{"AS", TK_AS},
|
||||
{"ASC", TK_ASC},
|
||||
{"AT_ONCE", TK_AT_ONCE},
|
||||
{"BALANCE", TK_BALANCE},
|
||||
{"BETWEEN", TK_BETWEEN},
|
||||
{"BINARY", TK_BINARY},
|
||||
{"BIGINT", TK_BIGINT},
|
||||
{"BNODE", TK_BNODE},
|
||||
{"BNODES", TK_BNODES},
|
||||
{"BOOL", TK_BOOL},
|
||||
{"BUFFER", TK_BUFFER},
|
||||
{"BUFSIZE", TK_BUFSIZE},
|
||||
{"BY", TK_BY},
|
||||
{"CACHE", TK_CACHE},
|
||||
{"CACHEMODEL", TK_CACHEMODEL},
|
||||
{"CACHESIZE", TK_CACHESIZE},
|
||||
{"CAST", TK_CAST},
|
||||
{"CLIENT_VERSION", TK_CLIENT_VERSION},
|
||||
{"CLUSTER", TK_CLUSTER},
|
||||
{"COLUMN", TK_COLUMN},
|
||||
{"COMMENT", TK_COMMENT},
|
||||
{"COMP", TK_COMP},
|
||||
{"CONNS", TK_CONNS},
|
||||
{"CONNECTION", TK_CONNECTION},
|
||||
{"CONNECTIONS", TK_CONNECTIONS},
|
||||
{"CONSUMER", TK_CONSUMER},
|
||||
{"CONSUMERS", TK_CONSUMERS},
|
||||
{"CONTAINS", TK_CONTAINS},
|
||||
{"COUNT", TK_COUNT},
|
||||
{"CREATE", TK_CREATE},
|
||||
{"CURRENT_USER", TK_CURRENT_USER},
|
||||
{"DATABASE", TK_DATABASE},
|
||||
{"DATABASES", TK_DATABASES},
|
||||
{"DBS", TK_DBS},
|
||||
{"DELETE", TK_DELETE},
|
||||
{"DESC", TK_DESC},
|
||||
{"DESCRIBE", TK_DESCRIBE},
|
||||
{"DISTINCT", TK_DISTINCT},
|
||||
{"DISTRIBUTED", TK_DISTRIBUTED},
|
||||
{"DNODE", TK_DNODE},
|
||||
{"DNODES", TK_DNODES},
|
||||
{"DOUBLE", TK_DOUBLE},
|
||||
{"DROP", TK_DROP},
|
||||
{"DURATION", TK_DURATION},
|
||||
{"ENABLE", TK_ENABLE},
|
||||
{"EXISTS", TK_EXISTS},
|
||||
{"EXPIRED", TK_EXPIRED},
|
||||
{"EXPLAIN", TK_EXPLAIN},
|
||||
{"EVERY", TK_EVERY},
|
||||
{"FILE", TK_FILE},
|
||||
{"FILL", TK_FILL},
|
||||
{"FIRST", TK_FIRST},
|
||||
{"FLOAT", TK_FLOAT},
|
||||
{"FLUSH", TK_FLUSH},
|
||||
{"FROM", TK_FROM},
|
||||
{"FUNCTION", TK_FUNCTION},
|
||||
{"FUNCTIONS", TK_FUNCTIONS},
|
||||
{"GRANT", TK_GRANT},
|
||||
{"GRANTS", TK_GRANTS},
|
||||
{"GROUP", TK_GROUP},
|
||||
{"HAVING", TK_HAVING},
|
||||
{"IF", TK_IF},
|
||||
{"IGNORE", TK_IGNORE},
|
||||
{"IMPORT", TK_IMPORT},
|
||||
{"IN", TK_IN},
|
||||
{"INDEX", TK_INDEX},
|
||||
{"INDEXES", TK_INDEXES},
|
||||
{"INNER", TK_INNER},
|
||||
{"INT", TK_INT},
|
||||
{"INSERT", TK_INSERT},
|
||||
{"INTEGER", TK_INTEGER},
|
||||
{"INTERVAL", TK_INTERVAL},
|
||||
{"INTO", TK_INTO},
|
||||
{"IS", TK_IS},
|
||||
{"JOIN", TK_JOIN},
|
||||
{"JSON", TK_JSON},
|
||||
{"KEEP", TK_KEEP},
|
||||
{"KILL", TK_KILL},
|
||||
{"LAST", TK_LAST},
|
||||
{"LAST_ROW", TK_LAST_ROW},
|
||||
{"LICENCE", TK_LICENCE},
|
||||
{"LIKE", TK_LIKE},
|
||||
{"LIMIT", TK_LIMIT},
|
||||
{"LINEAR", TK_LINEAR},
|
||||
{"LOCAL", TK_LOCAL},
|
||||
{"MATCH", TK_MATCH},
|
||||
{"MAXROWS", TK_MAXROWS},
|
||||
{"MAX_DELAY", TK_MAX_DELAY},
|
||||
{"MERGE", TK_MERGE},
|
||||
{"META", TK_META},
|
||||
{"MINROWS", TK_MINROWS},
|
||||
{"MINUS", TK_MINUS},
|
||||
{"MNODE", TK_MNODE},
|
||||
{"MNODES", TK_MNODES},
|
||||
{"MODIFY", TK_MODIFY},
|
||||
{"MODULES", TK_MODULES},
|
||||
{"NCHAR", TK_NCHAR},
|
||||
{"NEXT", TK_NEXT},
|
||||
{"NMATCH", TK_NMATCH},
|
||||
{"NONE", TK_NONE},
|
||||
{"NOT", TK_NOT},
|
||||
{"NOW", TK_NOW},
|
||||
{"NULL", TK_NULL},
|
||||
{"NULLS", TK_NULLS},
|
||||
{"OFFSET", TK_OFFSET},
|
||||
{"ON", TK_ON},
|
||||
{"OR", TK_OR},
|
||||
{"ORDER", TK_ORDER},
|
||||
{"OUTPUTTYPE", TK_OUTPUTTYPE},
|
||||
{"PARTITION", TK_PARTITION},
|
||||
{"PASS", TK_PASS},
|
||||
{"PAGES", TK_PAGES},
|
||||
{"PAGESIZE", TK_PAGESIZE},
|
||||
{"PORT", TK_PORT},
|
||||
{"PPS", TK_PPS},
|
||||
{"PRECISION", TK_PRECISION},
|
||||
{"PREV", TK_PREV},
|
||||
{"QNODE", TK_QNODE},
|
||||
{"QNODES", TK_QNODES},
|
||||
{"QTIME", TK_QTIME},
|
||||
{"QUERIES", TK_QUERIES},
|
||||
{"QUERY", TK_QUERY},
|
||||
{"RANGE", TK_RANGE},
|
||||
{"RATIO", TK_RATIO},
|
||||
{"READ", TK_READ},
|
||||
{"REDISTRIBUTE", TK_REDISTRIBUTE},
|
||||
{"RENAME", TK_RENAME},
|
||||
{"REPLICA", TK_REPLICA},
|
||||
{"RESET", TK_RESET},
|
||||
{"RETENTIONS", TK_RETENTIONS},
|
||||
{"REVOKE", TK_REVOKE},
|
||||
{"ROLLUP", TK_ROLLUP},
|
||||
{"SCHEMALESS", TK_SCHEMALESS},
|
||||
{"SCORES", TK_SCORES},
|
||||
{"SELECT", TK_SELECT},
|
||||
{"SERVER_STATUS", TK_SERVER_STATUS},
|
||||
{"SERVER_VERSION", TK_SERVER_VERSION},
|
||||
{"SESSION", TK_SESSION},
|
||||
{"SET", TK_SET},
|
||||
{"SHOW", TK_SHOW},
|
||||
{"SINGLE_STABLE", TK_SINGLE_STABLE},
|
||||
{"SLIDING", TK_SLIDING},
|
||||
{"SLIMIT", TK_SLIMIT},
|
||||
{"SMA", TK_SMA},
|
||||
{"SMALLINT", TK_SMALLINT},
|
||||
{"SNODE", TK_SNODE},
|
||||
{"SNODES", TK_SNODES},
|
||||
{"SOFFSET", TK_SOFFSET},
|
||||
{"SPLIT", TK_SPLIT},
|
||||
{"STABLE", TK_STABLE},
|
||||
{"STABLES", TK_STABLES},
|
||||
{"STATE", TK_STATE},
|
||||
{"STATE_WINDOW", TK_STATE_WINDOW},
|
||||
{"STORAGE", TK_STORAGE},
|
||||
{"STREAM", TK_STREAM},
|
||||
{"STREAMS", TK_STREAMS},
|
||||
{"STRICT", TK_STRICT},
|
||||
{"SUBSCRIPTIONS", TK_SUBSCRIPTIONS},
|
||||
{"SYSINFO", TK_SYSINFO},
|
||||
{"TABLE", TK_TABLE},
|
||||
{"TABLES", TK_TABLES},
|
||||
{"TAG", TK_TAG},
|
||||
{"TAGS", TK_TAGS},
|
||||
{"TBNAME", TK_TBNAME},
|
||||
{"TIMESTAMP", TK_TIMESTAMP},
|
||||
{"TIMEZONE", TK_TIMEZONE},
|
||||
{"TINYINT", TK_TINYINT},
|
||||
{"TO", TK_TO},
|
||||
{"TODAY", TK_TODAY},
|
||||
{"TOPIC", TK_TOPIC},
|
||||
{"TOPICS", TK_TOPICS},
|
||||
{"TRANSACTION", TK_TRANSACTION},
|
||||
{"TRANSACTIONS", TK_TRANSACTIONS},
|
||||
{"TRIGGER", TK_TRIGGER},
|
||||
{"TRIM", TK_TRIM},
|
||||
{"TSERIES", TK_TSERIES},
|
||||
{"TTL", TK_TTL},
|
||||
{"UNION", TK_UNION},
|
||||
{"UNSIGNED", TK_UNSIGNED},
|
||||
{"USE", TK_USE},
|
||||
{"USER", TK_USER},
|
||||
{"USERS", TK_USERS},
|
||||
{"USING", TK_USING},
|
||||
{"VALUE", TK_VALUE},
|
||||
{"VALUES", TK_VALUES},
|
||||
{"VARCHAR", TK_VARCHAR},
|
||||
{"VARIABLES", TK_VARIABLES},
|
||||
{"VERBOSE", TK_VERBOSE},
|
||||
{"VGROUP", TK_VGROUP},
|
||||
{"VGROUPS", TK_VGROUPS},
|
||||
{"WAL_FSYNC_PERIOD", TK_WAL_FSYNC_PERIOD},
|
||||
{"WAL_LEVEL", TK_WAL_LEVEL},
|
||||
{"WAL_RETENTION_PERIOD", TK_WAL_RETENTION_PERIOD},
|
||||
{"WAL_RETENTION_SIZE", TK_WAL_RETENTION_SIZE},
|
||||
{"WAL_ROLL_PERIOD", TK_WAL_ROLL_PERIOD},
|
||||
{"WAL_SEGMENT_SIZE", TK_WAL_SEGMENT_SIZE},
|
||||
{"WATERMARK", TK_WATERMARK},
|
||||
{"WHERE", TK_WHERE},
|
||||
{"WINDOW_CLOSE", TK_WINDOW_CLOSE},
|
||||
{"WITH", TK_WITH},
|
||||
{"WRITE", TK_WRITE},
|
||||
{"_C0", TK_ROWTS},
|
||||
{"_QDURATION", TK_QDURATION},
|
||||
{"_QEND", TK_QEND},
|
||||
{"_QSTART", TK_QSTART},
|
||||
{"_ROWTS", TK_ROWTS},
|
||||
{"_WDURATION", TK_WDURATION},
|
||||
{"_WEND", TK_WEND},
|
||||
{"_WSTART", TK_WSTART},
|
||||
// {"ID", TK_ID},
|
||||
// {"STRING", TK_STRING},
|
||||
// {"EQ", TK_EQ},
|
||||
// {"NE", TK_NE},
|
||||
// {"ISNULL", TK_ISNULL},
|
||||
// {"NOTNULL", TK_NOTNULL},
|
||||
// {"GLOB", TK_GLOB},
|
||||
// {"GT", TK_GT},
|
||||
// {"GE", TK_GE},
|
||||
// {"LT", TK_LT},
|
||||
// {"LE", TK_LE},
|
||||
// {"BITAND", TK_BITAND},
|
||||
// {"BITOR", TK_BITOR},
|
||||
// {"LSHIFT", TK_LSHIFT},
|
||||
// {"RSHIFT", TK_RSHIFT},
|
||||
// {"PLUS", TK_PLUS},
|
||||
// {"DIVIDE", TK_DIVIDE},
|
||||
// {"TIMES", TK_TIMES},
|
||||
// {"STAR", TK_STAR},
|
||||
// {"SLASH", TK_SLASH},
|
||||
// {"REM ", TK_REM},
|
||||
// {"||", TK_CONCAT},
|
||||
// {"UMINUS", TK_UMINUS},
|
||||
// {"UPLUS", TK_UPLUS},
|
||||
// {"BITNOT", TK_BITNOT},
|
||||
// {"DOT", TK_DOT},
|
||||
// {"CTIME", TK_CTIME},
|
||||
// {"LP", TK_LP},
|
||||
// {"RP", TK_RP},
|
||||
// {"COMMA", TK_COMMA},
|
||||
// {"VARIABLE", TK_VARIABLE},
|
||||
// {"UPDATE", TK_UPDATE},
|
||||
// {"CHANGE", TK_CHANGE},
|
||||
// {"COLON", TK_COLON},
|
||||
// {"ABORT", TK_ABORT},
|
||||
// {"AFTER", TK_AFTER},
|
||||
// {"ATTACH", TK_ATTACH},
|
||||
// {"BEFORE", TK_BEFORE},
|
||||
// {"BEGIN", TK_BEGIN},
|
||||
// {"CASCADE", TK_CASCADE},
|
||||
// {"CONFLICT", TK_CONFLICT},
|
||||
// {"COPY", TK_COPY},
|
||||
// {"DEFERRED", TK_DEFERRED},
|
||||
// {"DELIMITERS", TK_DELIMITERS},
|
||||
// {"DETACH", TK_DETACH},
|
||||
// {"EACH", TK_EACH},
|
||||
// {"END", TK_END},
|
||||
// {"FAIL", TK_FAIL},
|
||||
// {"FOR", TK_FOR},
|
||||
// {"IMMEDIATE", TK_IMMEDIATE},
|
||||
// {"INITIALLY", TK_INITIALLY},
|
||||
// {"INSTEAD", TK_INSTEAD},
|
||||
// {"KEY", TK_KEY},
|
||||
// {"OF", TK_OF},
|
||||
// {"RAISE", TK_RAISE},
|
||||
// {"REPLACE", TK_REPLACE},
|
||||
// {"RESTRICT", TK_RESTRICT},
|
||||
// {"ROW", TK_ROW},
|
||||
// {"STATEMENT", TK_STATEMENT},
|
||||
// {"VIEW", TK_VIEW},
|
||||
// {"SEMI", TK_SEMI},
|
||||
// {"PARTITIONS", TK_PARTITIONS},
|
||||
// {"MODE", TK_MODE},
|
||||
{"WATERMARK", TK_WATERMARK},
|
||||
{"WHERE", TK_WHERE},
|
||||
{"WINDOW_CLOSE", TK_WINDOW_CLOSE},
|
||||
{"WITH", TK_WITH},
|
||||
{"WRITE", TK_WRITE},
|
||||
{"_C0", TK_ROWTS},
|
||||
{"_QDURATION", TK_QDURATION},
|
||||
{"_QEND", TK_QEND},
|
||||
{"_QSTART", TK_QSTART},
|
||||
{"_ROWTS", TK_ROWTS},
|
||||
{"_WDURATION", TK_WDURATION},
|
||||
{"_WEND", TK_WEND},
|
||||
{"_WSTART", TK_WSTART},
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ typedef struct STranslateContext {
|
|||
SCmdMsgInfo* pCmdMsg;
|
||||
SHashObj* pDbs;
|
||||
SHashObj* pTables;
|
||||
SHashObj* pTargetTables;
|
||||
SExplainOptions* pExplainOpt;
|
||||
SParseMetaCache* pMetaCache;
|
||||
bool createStream;
|
||||
|
@ -89,10 +90,10 @@ static int32_t collectUseDatabase(const SName* pName, SHashObj* pDbs) {
|
|||
return collectUseDatabaseImpl(dbFName, pDbs);
|
||||
}
|
||||
|
||||
static int32_t collectUseTable(const SName* pName, SHashObj* pDbs) {
|
||||
static int32_t collectUseTable(const SName* pName, SHashObj* pTable) {
|
||||
char fullName[TSDB_TABLE_FNAME_LEN];
|
||||
tNameExtractFullName(pName, fullName);
|
||||
return taosHashPut(pDbs, fullName, strlen(fullName), pName, sizeof(SName));
|
||||
return taosHashPut(pTable, fullName, strlen(fullName), pName, sizeof(SName));
|
||||
}
|
||||
|
||||
static int32_t getTableMetaImpl(STranslateContext* pCxt, const SName* pName, STableMeta** pMeta) {
|
||||
|
@ -357,7 +358,8 @@ static int32_t initTranslateContext(SParseContext* pParseCxt, SParseMetaCache* p
|
|||
pCxt->pMetaCache = pMetaCache;
|
||||
pCxt->pDbs = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
pCxt->pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
if (NULL == pCxt->pNsLevel || NULL == pCxt->pDbs || NULL == pCxt->pTables) {
|
||||
pCxt->pTargetTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
if (NULL == pCxt->pNsLevel || NULL == pCxt->pDbs || NULL == pCxt->pTables || NULL == pCxt->pTargetTables) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -3188,7 +3190,8 @@ static int32_t checkOptionsDependency(STranslateContext* pCxt, const char* pDbNa
|
|||
daysToKeep0 = (-1 == daysToKeep0 ? dbCfg.daysToKeep0 : daysToKeep0);
|
||||
}
|
||||
if (daysPerFile > daysToKeep0) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DAYS_VALUE);
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DB_OPTION,
|
||||
"Invalid duration value, should be keep2 >= keep1 >= keep0 >= duration");
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -3933,6 +3936,9 @@ static int32_t buildCreateStbReq(STranslateContext* pCxt, SCreateTableStmt* pStm
|
|||
SName tableName;
|
||||
tNameExtractFullName(toName(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, &tableName), pReq->name);
|
||||
int32_t code = collectUseTable(&tableName, pCxt->pTables);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = collectUseTable(&tableName, pCxt->pTargetTables);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = buildRollupAst(pCxt, pStmt, pReq);
|
||||
}
|
||||
|
@ -3953,11 +3959,14 @@ static int32_t translateCreateSuperTable(STranslateContext* pCxt, SCreateTableSt
|
|||
}
|
||||
|
||||
static int32_t doTranslateDropSuperTable(STranslateContext* pCxt, const SName* pTableName, bool ignoreNotExists) {
|
||||
SMDropStbReq dropReq = {0};
|
||||
tNameExtractFullName(pTableName, dropReq.name);
|
||||
dropReq.igNotExists = ignoreNotExists;
|
||||
|
||||
return buildCmdMsg(pCxt, TDMT_MND_DROP_STB, (FSerializeFunc)tSerializeSMDropStbReq, &dropReq);
|
||||
int32_t code = collectUseTable(pTableName, pCxt->pTargetTables);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
SMDropStbReq dropReq = {0};
|
||||
tNameExtractFullName(pTableName, dropReq.name);
|
||||
dropReq.igNotExists = ignoreNotExists;
|
||||
code = buildCmdMsg(pCxt, TDMT_MND_DROP_STB, (FSerializeFunc)tSerializeSMDropStbReq, &dropReq);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t translateDropTable(STranslateContext* pCxt, SDropTableStmt* pStmt) {
|
||||
|
@ -5559,8 +5568,13 @@ static int32_t rewriteCreateTable(STranslateContext* pCxt, SQuery* pQuery) {
|
|||
|
||||
int32_t code = checkCreateTable(pCxt, pStmt, false);
|
||||
SVgroupInfo info = {0};
|
||||
SName name;
|
||||
toName(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, &name);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = getTableHashVgroup(pCxt, pStmt->dbName, pStmt->tableName, &info);
|
||||
code = getTableHashVgroupImpl(pCxt, &name, &info);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = collectUseTable(&name, pCxt->pTargetTables);
|
||||
}
|
||||
SArray* pBufArray = NULL;
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
|
@ -5829,6 +5843,11 @@ static int32_t rewriteCreateSubTable(STranslateContext* pCxt, SCreateSubTableCla
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = getTableMeta(pCxt, pStmt->useDbName, pStmt->useTableName, &pSuperTableMeta);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
SName name;
|
||||
toName(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, &name);
|
||||
code = collectUseTable(&name, pCxt->pTargetTables);
|
||||
}
|
||||
|
||||
STag* pTag = NULL;
|
||||
SArray* tagName = taosArrayInit(8, TSDB_COL_NAME_LEN);
|
||||
|
@ -5927,8 +5946,13 @@ static void addDropTbReqIntoVgroup(SHashObj* pVgroupHashmap, SDropTableClause* p
|
|||
|
||||
static int32_t buildDropTableVgroupHashmap(STranslateContext* pCxt, SDropTableClause* pClause, bool* pIsSuperTable,
|
||||
SHashObj* pVgroupHashmap) {
|
||||
SName name;
|
||||
toName(pCxt->pParseCxt->acctId, pClause->dbName, pClause->tableName, &name);
|
||||
STableMeta* pTableMeta = NULL;
|
||||
int32_t code = getTableMeta(pCxt, pClause->dbName, pClause->tableName, &pTableMeta);
|
||||
int32_t code = getTableMetaImpl(pCxt, &name, &pTableMeta);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = collectUseTable(&name, pCxt->pTargetTables);
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code && TSDB_SUPER_TABLE == pTableMeta->tableType) {
|
||||
*pIsSuperTable = true;
|
||||
|
@ -6509,6 +6533,20 @@ static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery) {
|
|||
pTable = taosHashIterate(pCxt->pTables, pTable);
|
||||
}
|
||||
}
|
||||
|
||||
if (NULL != pCxt->pTargetTables) {
|
||||
taosArrayDestroy(pQuery->pTargetTableList);
|
||||
pQuery->pTargetTableList = taosArrayInit(taosHashGetSize(pCxt->pTargetTables), sizeof(SName));
|
||||
if (NULL == pQuery->pTargetTableList) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
SName* pTable = taosHashIterate(pCxt->pTargetTables, NULL);
|
||||
while (NULL != pTable) {
|
||||
taosArrayPush(pQuery->pTargetTableList, pTable);
|
||||
pTable = taosHashIterate(pCxt->pTargetTables, pTable);
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -99,8 +99,6 @@ static char* getSyntaxErrFormat(int32_t errCode) {
|
|||
return "Query block has incorrect number of result columns";
|
||||
case TSDB_CODE_PAR_INCORRECT_TIMESTAMP_VAL:
|
||||
return "Incorrect TIMESTAMP value: %s";
|
||||
case TSDB_CODE_PAR_INVALID_DAYS_VALUE:
|
||||
return "Invalid days value, should be keep2 >= keep1 >= keep0 >= days";
|
||||
case TSDB_CODE_PAR_OFFSET_LESS_ZERO:
|
||||
return "soffset/offset can not be less than 0";
|
||||
case TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_BY:
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -82,12 +82,12 @@ TEST_F(ParserInitialATest, alterDnode) {
|
|||
* BUFFER int_value -- todo: range [3, 16384], default 96, unit MB
|
||||
* | CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'} -- default 'none'
|
||||
* | CACHESIZE int_value -- range [1, 65536], default 1, unit MB
|
||||
* | FSYNC int_value -- rang [0, 180000], default 3000, unit ms
|
||||
* | WAL_FSYNC_PERIOD int_value -- rang [0, 180000], default 3000, unit ms
|
||||
* | KEEP {int_value | duration_value} -- rang [1, 365000], default 3650, unit day
|
||||
* | PAGES int_value -- todo: rang [64, +oo), default 256, unit page
|
||||
* | REPLICA int_value -- todo: enum 1, 3, default 1, unit replica
|
||||
* | STRICT {'off' | 'on'} -- todo: default 'off'
|
||||
* | WAL int_value -- enum 1, 2, default 1
|
||||
* | WAL_LEVEL int_value -- enum 1, 2, default 1
|
||||
* }
|
||||
*/
|
||||
TEST_F(ParserInitialATest, alterDatabase) {
|
||||
|
@ -157,7 +157,7 @@ TEST_F(ParserInitialATest, alterDatabase) {
|
|||
setAlterDbFsync(200);
|
||||
setAlterDbWal(1);
|
||||
setAlterDbCacheModel(TSDB_CACHE_MODEL_LAST_ROW);
|
||||
run("ALTER DATABASE test CACHEMODEL 'last_row' CACHESIZE 32 FSYNC 200 KEEP 10 WAL 1");
|
||||
run("ALTER DATABASE test CACHEMODEL 'last_row' CACHESIZE 32 WAL_FSYNC_PERIOD 200 KEEP 10 WAL_LEVEL 1");
|
||||
clearAlterDbReq();
|
||||
|
||||
initAlterDb("test");
|
||||
|
@ -182,11 +182,11 @@ TEST_F(ParserInitialATest, alterDatabase) {
|
|||
|
||||
initAlterDb("test");
|
||||
setAlterDbFsync(0);
|
||||
run("ALTER DATABASE test FSYNC 0");
|
||||
run("ALTER DATABASE test WAL_FSYNC_PERIOD 0");
|
||||
setAlterDbFsync(1000);
|
||||
run("ALTER DATABASE test FSYNC 1000");
|
||||
run("ALTER DATABASE test WAL_FSYNC_PERIOD 1000");
|
||||
setAlterDbFsync(180000);
|
||||
run("ALTER DATABASE test FSYNC 180000");
|
||||
run("ALTER DATABASE test WAL_FSYNC_PERIOD 180000");
|
||||
clearAlterDbReq();
|
||||
|
||||
initAlterDb("test");
|
||||
|
@ -210,9 +210,9 @@ TEST_F(ParserInitialATest, alterDatabase) {
|
|||
|
||||
initAlterDb("test");
|
||||
setAlterDbWal(1);
|
||||
run("ALTER DATABASE test WAL 1");
|
||||
run("ALTER DATABASE test WAL_LEVEL 1");
|
||||
setAlterDbWal(2);
|
||||
run("ALTER DATABASE test WAL 2");
|
||||
run("ALTER DATABASE test WAL_LEVEL 2");
|
||||
clearAlterDbReq();
|
||||
}
|
||||
|
||||
|
@ -223,16 +223,16 @@ TEST_F(ParserInitialATest, alterDatabaseSemanticCheck) {
|
|||
run("ALTER DATABASE test CACHESIZE 0", TSDB_CODE_PAR_INVALID_DB_OPTION);
|
||||
run("ALTER DATABASE test CACHESIZE 65537", TSDB_CODE_PAR_INVALID_DB_OPTION);
|
||||
// The syntax limits it to only positive numbers
|
||||
run("ALTER DATABASE test FSYNC -1", TSDB_CODE_PAR_SYNTAX_ERROR, PARSER_STAGE_PARSE);
|
||||
run("ALTER DATABASE test FSYNC 180001", TSDB_CODE_PAR_INVALID_DB_OPTION);
|
||||
run("ALTER DATABASE test WAL_FSYNC_PERIOD -1", TSDB_CODE_PAR_SYNTAX_ERROR, PARSER_STAGE_PARSE);
|
||||
run("ALTER DATABASE test WAL_FSYNC_PERIOD 180001", TSDB_CODE_PAR_INVALID_DB_OPTION);
|
||||
run("ALTER DATABASE test KEEP 0", TSDB_CODE_PAR_INVALID_DB_OPTION);
|
||||
run("ALTER DATABASE test KEEP 365001", TSDB_CODE_PAR_INVALID_DB_OPTION);
|
||||
run("ALTER DATABASE test KEEP 1000000000s", TSDB_CODE_PAR_INVALID_DB_OPTION);
|
||||
run("ALTER DATABASE test KEEP 1w", TSDB_CODE_PAR_INVALID_DB_OPTION);
|
||||
run("ALTER DATABASE test WAL 0", TSDB_CODE_PAR_INVALID_DB_OPTION);
|
||||
run("ALTER DATABASE test WAL 3", TSDB_CODE_PAR_INVALID_DB_OPTION);
|
||||
run("ALTER DATABASE test WAL_LEVEL 0", TSDB_CODE_PAR_INVALID_DB_OPTION);
|
||||
run("ALTER DATABASE test WAL_LEVEL 3", TSDB_CODE_PAR_INVALID_DB_OPTION);
|
||||
// Regardless of the specific sentence
|
||||
run("ALTER DATABASE db WAL 0 # td-14436", TSDB_CODE_PAR_SYNTAX_ERROR, PARSER_STAGE_PARSE);
|
||||
run("ALTER DATABASE db WAL_LEVEL 0 # td-14436", TSDB_CODE_PAR_SYNTAX_ERROR, PARSER_STAGE_PARSE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -21,12 +21,6 @@ namespace ParserTest {
|
|||
|
||||
class ParserInitialCTest : public ParserDdlTest {};
|
||||
|
||||
TEST_F(ParserInitialCTest, compact) {
|
||||
useDb("root", "test");
|
||||
|
||||
run("COMPACT VNODES IN (1, 2)", TSDB_CODE_PAR_EXPRIE_STATEMENT, PARSER_STAGE_PARSE);
|
||||
}
|
||||
|
||||
TEST_F(ParserInitialCTest, createAccount) {
|
||||
useDb("root", "test");
|
||||
|
||||
|
@ -64,7 +58,7 @@ TEST_F(ParserInitialCTest, createBnode) {
|
|||
* | CACHESIZE value
|
||||
* | COMP {0 | 1 | 2}
|
||||
* | DURATION value
|
||||
* | FSYNC value
|
||||
* | WAL_FSYNC_PERIOD value
|
||||
* | MAXROWS value
|
||||
* | MINROWS value
|
||||
* | KEEP value
|
||||
|
@ -74,7 +68,7 @@ TEST_F(ParserInitialCTest, createBnode) {
|
|||
* | REPLICA value
|
||||
* | RETENTIONS ingestion_duration:keep_duration ...
|
||||
* | STRICT {'off' | 'on'}
|
||||
* | WAL value
|
||||
* | WAL_LEVEL value
|
||||
* | VGROUPS value
|
||||
* | SINGLE_STABLE {0 | 1}
|
||||
* | WAL_RETENTION_PERIOD value
|
||||
|
@ -241,7 +235,7 @@ TEST_F(ParserInitialCTest, createDatabase) {
|
|||
"CACHESIZE 20 "
|
||||
"COMP 1 "
|
||||
"DURATION 100 "
|
||||
"FSYNC 100 "
|
||||
"WAL_FSYNC_PERIOD 100 "
|
||||
"MAXROWS 1000 "
|
||||
"MINROWS 100 "
|
||||
"KEEP 1440 "
|
||||
|
@ -251,7 +245,7 @@ TEST_F(ParserInitialCTest, createDatabase) {
|
|||
"REPLICA 3 "
|
||||
"RETENTIONS 15s:7d,1m:21d,15m:500d "
|
||||
"STRICT 'on' "
|
||||
"WAL 2 "
|
||||
"WAL_LEVEL 2 "
|
||||
"VGROUPS 100 "
|
||||
"SINGLE_STABLE 1 "
|
||||
"SCHEMALESS 1 "
|
||||
|
|
|
@ -553,6 +553,7 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp
|
|||
pTableScan->triggerType = pScanLogicNode->triggerType;
|
||||
pTableScan->watermark = pScanLogicNode->watermark;
|
||||
pTableScan->igExpired = pScanLogicNode->igExpired;
|
||||
pTableScan->assignBlockUid = pCxt->pPlanCxt->rSmaQuery ? true : false;
|
||||
|
||||
return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTableScan, pPhyNode);
|
||||
}
|
||||
|
|
|
@ -38,14 +38,14 @@ int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum);
|
|||
int32_t streamDispatch(SStreamTask* pTask, SMsgCb* pMsgCb);
|
||||
int32_t streamDispatchReqToData(const SStreamDispatchReq* pReq, SStreamDataBlock* pData);
|
||||
int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock* pData);
|
||||
int32_t streamBuildDispatchMsg(SStreamTask* pTask, const SStreamDataBlock* data, SRpcMsg* pMsg, SEpSet** ppEpSet);
|
||||
int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* data);
|
||||
|
||||
int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock);
|
||||
|
||||
int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* pReq);
|
||||
|
||||
SStreamQueueItem* streamAppendQueueItem(SStreamQueueItem* dst, SStreamQueueItem* elem);
|
||||
void streamFreeQitem(SStreamQueueItem* data);
|
||||
void streamFreeQitem(SStreamQueueItem* data);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -219,6 +219,12 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp) {
|
|||
|
||||
qDebug("task %d receive dispatch rsp", pTask->taskId);
|
||||
|
||||
if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) {
|
||||
int32_t leftRsp = atomic_sub_fetch_32(&pTask->shuffleDispatcher.waitingRspCnt, 1);
|
||||
qDebug("task %d is shuffle, left waiting rsp %d", pTask->taskId, leftRsp);
|
||||
if (leftRsp > 0) return 0;
|
||||
}
|
||||
|
||||
int8_t old = atomic_exchange_8(&pTask->outputStatus, pRsp->inputStatus);
|
||||
ASSERT(old == TASK_OUTPUT_STATUS__WAIT);
|
||||
if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) {
|
||||
|
|
|
@ -198,6 +198,158 @@ static int32_t streamAddBlockToDispatchMsg(const SSDataBlock* pBlock, SStreamDis
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamDispatchOneReq(SStreamTask* pTask, const SStreamDispatchReq* pReq, int32_t vgId, SEpSet* pEpSet) {
|
||||
void* buf = NULL;
|
||||
int32_t code = -1;
|
||||
SRpcMsg msg = {0};
|
||||
|
||||
// serialize
|
||||
int32_t tlen;
|
||||
tEncodeSize(tEncodeStreamDispatchReq, pReq, tlen, code);
|
||||
if (code < 0) goto FAIL;
|
||||
code = -1;
|
||||
buf = rpcMallocCont(sizeof(SMsgHead) + tlen);
|
||||
if (buf == NULL) {
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
((SMsgHead*)buf)->vgId = htonl(vgId);
|
||||
void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
|
||||
|
||||
SEncoder encoder;
|
||||
tEncoderInit(&encoder, abuf, tlen);
|
||||
if ((code = tEncodeStreamDispatchReq(&encoder, pReq)) < 0) {
|
||||
goto FAIL;
|
||||
}
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
msg.contLen = tlen + sizeof(SMsgHead);
|
||||
msg.pCont = buf;
|
||||
msg.msgType = pTask->dispatchMsgType;
|
||||
|
||||
tmsgSendReq(pEpSet, &msg);
|
||||
|
||||
code = 0;
|
||||
FAIL:
|
||||
if (code < 0 && buf) rpcFreeCont(buf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData) {
|
||||
int32_t code = -1;
|
||||
int32_t blockNum = taosArrayGetSize(pData->blocks);
|
||||
ASSERT(blockNum != 0);
|
||||
|
||||
if (pTask->dispatchType == TASK_DISPATCH__FIXED) {
|
||||
SStreamDispatchReq req = {
|
||||
.streamId = pTask->streamId,
|
||||
.dataSrcVgId = pData->srcVgId,
|
||||
.upstreamTaskId = pTask->taskId,
|
||||
.upstreamChildId = pTask->selfChildId,
|
||||
.upstreamNodeId = pTask->nodeId,
|
||||
.blockNum = blockNum,
|
||||
};
|
||||
|
||||
req.data = taosArrayInit(blockNum, sizeof(void*));
|
||||
req.dataLen = taosArrayInit(blockNum, sizeof(int32_t));
|
||||
if (req.data == NULL || req.dataLen == NULL) {
|
||||
goto FAIL_FIXED_DISPATCH;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < blockNum; i++) {
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pData->blocks, i);
|
||||
if (streamAddBlockToDispatchMsg(pDataBlock, &req) < 0) {
|
||||
goto FAIL_FIXED_DISPATCH;
|
||||
}
|
||||
}
|
||||
int32_t vgId = pTask->fixedEpDispatcher.nodeId;
|
||||
SEpSet* pEpSet = &pTask->fixedEpDispatcher.epSet;
|
||||
int32_t downstreamTaskId = pTask->fixedEpDispatcher.taskId;
|
||||
|
||||
req.taskId = downstreamTaskId;
|
||||
|
||||
qDebug("dispatch from task %d (child id %d) to down stream task %d in vnode %d", pTask->taskId, pTask->selfChildId,
|
||||
downstreamTaskId, vgId);
|
||||
|
||||
if (streamDispatchOneReq(pTask, &req, vgId, pEpSet) < 0) {
|
||||
goto FAIL_FIXED_DISPATCH;
|
||||
}
|
||||
code = 0;
|
||||
FAIL_FIXED_DISPATCH:
|
||||
taosArrayDestroy(req.data);
|
||||
taosArrayDestroy(req.dataLen);
|
||||
return code;
|
||||
|
||||
} else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) {
|
||||
SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
|
||||
ASSERT(pTask->shuffleDispatcher.waitingRspCnt == 0);
|
||||
int32_t vgSz = taosArrayGetSize(vgInfo);
|
||||
SStreamDispatchReq* pReqs = taosMemoryCalloc(vgSz, sizeof(SStreamDispatchReq));
|
||||
if (pReqs == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < vgSz; i++) {
|
||||
pReqs[i].streamId = pTask->streamId;
|
||||
pReqs[i].dataSrcVgId = pData->srcVgId;
|
||||
pReqs[i].upstreamTaskId = pTask->taskId;
|
||||
pReqs[i].upstreamChildId = pTask->selfChildId;
|
||||
pReqs[i].upstreamNodeId = pTask->nodeId;
|
||||
pReqs[i].blockNum = 0;
|
||||
pReqs[i].data = taosArrayInit(0, sizeof(void*));
|
||||
pReqs[i].dataLen = taosArrayInit(0, sizeof(int32_t));
|
||||
if (pReqs[i].data == NULL || pReqs[i].dataLen == NULL) {
|
||||
goto FAIL_SHUFFLE_DISPATCH;
|
||||
}
|
||||
}
|
||||
for (int32_t i = 0; i < blockNum; i++) {
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pData->blocks, i);
|
||||
char* ctbName = buildCtbNameByGroupId(pTask->shuffleDispatcher.stbFullName, pDataBlock->info.groupId);
|
||||
|
||||
// TODO: get hash function by hashMethod
|
||||
uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));
|
||||
|
||||
// TODO: optimize search
|
||||
int32_t j;
|
||||
for (j = 0; j < vgSz; j++) {
|
||||
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, j);
|
||||
ASSERT(pVgInfo->vgId > 0);
|
||||
if (hashValue >= pVgInfo->hashBegin && hashValue <= pVgInfo->hashEnd) {
|
||||
if (streamAddBlockToDispatchMsg(pDataBlock, &pReqs[j]) < 0) {
|
||||
goto FAIL_SHUFFLE_DISPATCH;
|
||||
}
|
||||
pReqs[j].taskId = pVgInfo->taskId;
|
||||
pReqs[j].blockNum++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
for (int32_t i = 0; i < vgSz; i++) {
|
||||
if (pReqs[i].blockNum > 0) {
|
||||
// send
|
||||
SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
|
||||
if (streamDispatchOneReq(pTask, &pReqs[i], pVgInfo->vgId, &pVgInfo->epSet) < 0) {
|
||||
goto FAIL_SHUFFLE_DISPATCH;
|
||||
}
|
||||
pTask->shuffleDispatcher.waitingRspCnt++;
|
||||
}
|
||||
}
|
||||
code = 0;
|
||||
FAIL_SHUFFLE_DISPATCH:
|
||||
if (pReqs) {
|
||||
for (int32_t i = 0; i < vgSz; i++) {
|
||||
taosArrayDestroy(pReqs[i].data);
|
||||
taosArrayDestroy(pReqs[i].dataLen);
|
||||
}
|
||||
taosMemoryFree(pReqs);
|
||||
}
|
||||
return code;
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamBuildDispatchMsg(SStreamTask* pTask, const SStreamDataBlock* data, SRpcMsg* pMsg, SEpSet** ppEpSet) {
|
||||
void* buf = NULL;
|
||||
int32_t code = -1;
|
||||
|
@ -262,29 +414,7 @@ int32_t streamBuildDispatchMsg(SStreamTask* pTask, const SStreamDataBlock* data,
|
|||
qDebug("dispatch from task %d (child id %d) to down stream task %d in vnode %d", pTask->taskId, pTask->selfChildId,
|
||||
downstreamTaskId, vgId);
|
||||
|
||||
// serialize
|
||||
int32_t tlen;
|
||||
tEncodeSize(tEncodeStreamDispatchReq, &req, tlen, code);
|
||||
if (code < 0) goto FAIL;
|
||||
code = -1;
|
||||
buf = rpcMallocCont(sizeof(SMsgHead) + tlen);
|
||||
if (buf == NULL) {
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
((SMsgHead*)buf)->vgId = htonl(vgId);
|
||||
void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
|
||||
|
||||
SEncoder encoder;
|
||||
tEncoderInit(&encoder, abuf, tlen);
|
||||
if ((code = tEncodeStreamDispatchReq(&encoder, &req)) < 0) {
|
||||
goto FAIL;
|
||||
}
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
pMsg->contLen = tlen + sizeof(SMsgHead);
|
||||
pMsg->pCont = buf;
|
||||
pMsg->msgType = pTask->dispatchMsgType;
|
||||
streamDispatchOneReq(pTask, &req, vgId, *ppEpSet);
|
||||
|
||||
code = 0;
|
||||
FAIL:
|
||||
|
@ -314,6 +444,18 @@ int32_t streamDispatch(SStreamTask* pTask, SMsgCb* pMsgCb) {
|
|||
|
||||
qDebug("stream continue dispatching: task %d", pTask->taskId);
|
||||
|
||||
int32_t code = 0;
|
||||
if (streamDispatchAllBlocks(pTask, pBlock) < 0) {
|
||||
ASSERT(0);
|
||||
code = -1;
|
||||
// TODO set status fail
|
||||
goto FREE;
|
||||
}
|
||||
/*atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL);*/
|
||||
FREE:
|
||||
taosArrayDestroyEx(pBlock->blocks, (FDelete)blockDataFreeRes);
|
||||
taosFreeQitem(pBlock);
|
||||
#if 0
|
||||
SRpcMsg dispatchMsg = {0};
|
||||
SEpSet* pEpSet = NULL;
|
||||
if (streamBuildDispatchMsg(pTask, pBlock, &dispatchMsg, &pEpSet) < 0) {
|
||||
|
@ -325,5 +467,6 @@ int32_t streamDispatch(SStreamTask* pTask, SMsgCb* pMsgCb) {
|
|||
taosFreeQitem(pBlock);
|
||||
|
||||
tmsgSendReq(pEpSet, &dispatchMsg);
|
||||
return 0;
|
||||
#endif
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -83,7 +83,16 @@ SWal *walOpen(const char *path, SWalCfg *pCfg) {
|
|||
|
||||
// set config
|
||||
memcpy(&pWal->cfg, pCfg, sizeof(SWalCfg));
|
||||
|
||||
pWal->fsyncSeq = pCfg->fsyncPeriod / 1000;
|
||||
if (pWal->cfg.retentionSize > 0) {
|
||||
pWal->cfg.retentionSize *= 1024;
|
||||
}
|
||||
|
||||
if (pWal->cfg.segSize > 0) {
|
||||
pWal->cfg.segSize *= 1024;
|
||||
}
|
||||
|
||||
if (pWal->fsyncSeq <= 0) pWal->fsyncSeq = 1;
|
||||
|
||||
tstrncpy(pWal->path, path, sizeof(pWal->path));
|
||||
|
|
|
@ -401,6 +401,7 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy
|
|||
pWal->writeHead.head.version = index;
|
||||
pWal->writeHead.head.bodyLen = bodyLen;
|
||||
pWal->writeHead.head.msgType = msgType;
|
||||
pWal->writeHead.head.ingestTs = taosGetTimestampMs();
|
||||
|
||||
// sync info for sync module
|
||||
pWal->writeHead.head.syncMeta = syncMeta;
|
||||
|
@ -457,14 +458,14 @@ int64_t walAppendLog(SWal *pWal, tmsg_t msgType, SWalSyncInfo syncMeta, const vo
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (pWal->pIdxFile == NULL || pWal->pIdxFile == NULL || pWal->writeCur < 0) {
|
||||
if (pWal->pLogFile == NULL || pWal->pIdxFile == NULL || pWal->writeCur < 0) {
|
||||
if (walInitWriteFile(pWal) < 0) {
|
||||
taosThreadMutexUnlock(&pWal->mutex);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(pWal->pIdxFile != NULL && pWal->pLogFile != NULL && pWal->writeCur >= 0);
|
||||
ASSERT(pWal->pLogFile != NULL && pWal->pIdxFile != NULL && pWal->writeCur >= 0);
|
||||
|
||||
if (walWriteImpl(pWal, index, msgType, syncMeta, body, bodyLen) < 0) {
|
||||
taosThreadMutexUnlock(&pWal->mutex);
|
||||
|
|
|
@ -517,7 +517,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTER_SLIDING_TOO_SMALL, "sliding value can no
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_ONLY_ONE_JSON_TAG, "Only one tag if there is a json tag")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INCORRECT_NUM_OF_COL, "Query block has incorrect number of result columns")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INCORRECT_TIMESTAMP_VAL, "Incorrect TIMESTAMP value")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_DAYS_VALUE, "Invalid days value, should be keep2 >= keep1 >= keep0 >= days")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_OFFSET_LESS_ZERO, "soffset/offset can not be less than 0")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_BY, "slimit/soffset only available for PARTITION BY query")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_TOPIC_QUERY, "Invalid topic query")
|
||||
|
|
|
@ -47,16 +47,16 @@ class TDTestCase:
|
|||
tdSql.error('create database db comp "1.4"')
|
||||
tdSql.error("create database db blocks '10'")
|
||||
tdSql.error('create database db keep "3650"')
|
||||
tdSql.error('create database db fsync "3650"')
|
||||
tdSql.error('create database db wal_fsync_period "3650"')
|
||||
tdSql.execute('create database db precision "us"')
|
||||
tdSql.query('show databases')
|
||||
tdSql.checkData(0,16,'us')
|
||||
tdSql.execute('drop database if exists db')
|
||||
|
||||
#checking float input exception for create
|
||||
tdSql.error("create database db fsync 7.3")
|
||||
tdSql.error("create database db fsync 0.0")
|
||||
tdSql.error("create database db fsync -5.32")
|
||||
tdSql.error("create database db wal_fsync_period 7.3")
|
||||
tdSql.error("create database db wal_fsync_period 0.0")
|
||||
tdSql.error("create database db wal_fsync_period -5.32")
|
||||
tdSql.error('create database db comp 7.2')
|
||||
tdSql.error("create database db blocks 5.87")
|
||||
tdSql.error('create database db keep 15.4')
|
||||
|
|
|
@ -77,7 +77,7 @@ class TDTestCase:
|
|||
"walLevel": 1,
|
||||
"cachelast": 0,
|
||||
"quorum": 1,
|
||||
"fsync": 3000,
|
||||
"wal_fsync_period": 3000,
|
||||
"update": 0
|
||||
}
|
||||
|
||||
|
|
|
@ -92,7 +92,7 @@ class JoinPerf:
|
|||
"walLevel": 1,
|
||||
"cachelast": 0,
|
||||
"quorum": 1,
|
||||
"fsync": 3000,
|
||||
"wal_fsync_period": 3000,
|
||||
"update": 0
|
||||
}
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ class Taosdemo:
|
|||
"walLevel": 1,
|
||||
"cachelast": 0,
|
||||
"quorum": 1,
|
||||
"fsync": 3000,
|
||||
"wal_fsync_period": 3000,
|
||||
"update": 0
|
||||
}
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ class TDTestCase:
|
|||
"walLevel": 1,
|
||||
"cachelast": 0,
|
||||
"quorum": 1,
|
||||
"fsync": 3000,
|
||||
"wal_fsync_period": 3000,
|
||||
"update": 0
|
||||
}
|
||||
|
||||
|
|
|
@ -165,13 +165,13 @@
|
|||
# TD-17661 ./test.sh -f tsim/parser/where.sim
|
||||
|
||||
# ---- query
|
||||
./test.sh -f tsim/query/interval.sim
|
||||
./test.sh -f tsim/query/interval-offset.sim
|
||||
./test.sh -f tsim/query/scalarFunction.sim
|
||||
./test.sh -f tsim/query/charScalarFunction.sim
|
||||
./test.sh -f tsim/query/explain.sim
|
||||
./test.sh -f tsim/query/session.sim
|
||||
# ./test.sh -f tsim/query/explain.sim
|
||||
./test.sh -f tsim/query/interval-offset.sim
|
||||
./test.sh -f tsim/query/interval.sim
|
||||
./test.sh -f tsim/query/scalarFunction.sim
|
||||
./test.sh -f tsim/query/scalarNull.sim
|
||||
./test.sh -f tsim/query/session.sim
|
||||
./test.sh -f tsim/query/udf.sim
|
||||
|
||||
# ---- qnode
|
||||
|
|
|
@ -41,12 +41,12 @@ print ============= create database
|
|||
# | BUFFER value [3~16384, default: 96]
|
||||
# | PAGES value [64~16384, default: 256]
|
||||
# | CACHEMODEL value ['node', 'last_row', 'last_value', 'both']
|
||||
# | FSYNC value [0 ~ 180000 ms]
|
||||
# | WAL_FSYNC_PERIOD value [0 ~ 180000 ms]
|
||||
# | KEEP value [duration, 365000]
|
||||
# | REPLICA value [1 | 3]
|
||||
# | WAL value [1 | 2]
|
||||
# | WAL_LEVEL value [1 | 2]
|
||||
|
||||
sql create database db CACHEMODEL 'both' COMP 0 DURATION 240 FSYNC 1000 MAXROWS 8000 MINROWS 10 KEEP 1000 PRECISION 'ns' REPLICA 3 WAL 2 VGROUPS 6 SINGLE_STABLE 1
|
||||
sql create database db CACHEMODEL 'both' COMP 0 DURATION 240 WAL_FSYNC_PERIOD 1000 MAXROWS 8000 MINROWS 10 KEEP 1000 PRECISION 'ns' REPLICA 3 WAL_LEVEL 2 VGROUPS 6 SINGLE_STABLE 1
|
||||
sql show databases
|
||||
print rows: $rows
|
||||
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
|
||||
|
@ -99,16 +99,16 @@ endi
|
|||
if $data14_db != ns then # precision
|
||||
return -1
|
||||
endi
|
||||
if $data18_db != both then # cache_model
|
||||
if $data18_db != both then # cachemodel
|
||||
return -1
|
||||
endi
|
||||
if $data19_db != 1 then # cash_size
|
||||
return -1
|
||||
endi
|
||||
if $data20_db != 2 then # wal level
|
||||
if $data20_db != 2 then # wal_level level
|
||||
return -1
|
||||
endi
|
||||
if $data21_db != 1000 then # wal fsyncperiod
|
||||
if $data21_db != 1000 then # wal_level fsyncperiod
|
||||
return -1
|
||||
endi
|
||||
if $data22_db != 0 then #
|
||||
|
@ -296,46 +296,46 @@ sql_error alter database db maxrows 2000
|
|||
sql_error alter database db maxrows 11 # equal minrows
|
||||
sql_error alter database db maxrows 10 # little than minrows
|
||||
|
||||
print ============== step wal
|
||||
sql alter database db wal 1
|
||||
print ============== step wal_level
|
||||
sql alter database db wal_level 1
|
||||
sql show databases
|
||||
print wal $data20_db
|
||||
print wal_level $data20_db
|
||||
if $data20_db != 1 then
|
||||
return -1
|
||||
endi
|
||||
sql alter database db wal 2
|
||||
sql alter database db wal_level 2
|
||||
sql show databases
|
||||
print wal $data20_db
|
||||
print wal_level $data20_db
|
||||
if $data20_db != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql_error alter database db wal 0 # TD-14436
|
||||
sql_error alter database db wal 3
|
||||
sql_error alter database db wal 100
|
||||
sql_error alter database db wal -1
|
||||
sql_error alter database db wal_level 0 # TD-14436
|
||||
sql_error alter database db wal_level 3
|
||||
sql_error alter database db wal_level 100
|
||||
sql_error alter database db wal_level -1
|
||||
|
||||
print ============== modify fsync
|
||||
sql alter database db fsync 2000
|
||||
print ============== modify wal_fsync_period
|
||||
sql alter database db wal_fsync_period 2000
|
||||
sql show databases
|
||||
print fsync $data21_db
|
||||
print wal_fsync_period $data21_db
|
||||
if $data21_db != 2000 then
|
||||
return -1
|
||||
endi
|
||||
sql alter database db fsync 500
|
||||
sql alter database db wal_fsync_period 500
|
||||
sql show databases
|
||||
print fsync $data21_db
|
||||
print wal_fsync_period $data21_db
|
||||
if $data21_db != 500 then
|
||||
return -1
|
||||
endi
|
||||
sql alter database db fsync 0
|
||||
sql alter database db wal_fsync_period 0
|
||||
sql show databases
|
||||
print fsync $data21_db
|
||||
print wal_fsync_period $data21_db
|
||||
if $data21_db != 0 then
|
||||
return -1
|
||||
endi
|
||||
sql_error alter database db fsync 180001
|
||||
sql_error alter database db fsync -1
|
||||
sql_error alter database db wal_fsync_period 180001
|
||||
sql_error alter database db wal_fsync_period -1
|
||||
|
||||
print ============== modify comp
|
||||
sql_error alter database db comp 1
|
||||
|
|
|
@ -15,7 +15,7 @@ $tb = $tbPrefix . $i
|
|||
|
||||
print =============== step1
|
||||
# quorum presicion
|
||||
sql create database $db vgroups 8 replica 1 duration 2 keep 10 minrows 80 maxrows 10000 wal 2 fsync 1000 comp 0 cachemodel 'last_value' precision 'us'
|
||||
sql create database $db vgroups 8 replica 1 duration 2 keep 10 minrows 80 maxrows 10000 wal_level 2 wal_fsync_period 1000 comp 0 cachemodel 'last_value' precision 'us'
|
||||
sql show databases
|
||||
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
|
||||
|
||||
|
|
|
@ -43,13 +43,13 @@ print ============= create database with all options
|
|||
# | CACHEMODEL value ['node', 'last_row', 'last_value', 'both', default: 'node']
|
||||
# | COMP [0 | 1 | 2, default: 2]
|
||||
# | DURATION value [60m ~ min(3650d,keep), default: 10d, unit may be minut/hour/day]
|
||||
# | FSYNC value [0 ~ 180000 ms, default: 3000]
|
||||
# | WAL_FSYNC_PERIOD value [0 ~ 180000 ms, default: 3000]
|
||||
# | MAXROWS value [200~10000, default: 4096]
|
||||
# | MINROWS value [10~1000, default: 100]
|
||||
# | KEEP value [max(1d ~ 365000d), default: 1d, unit may be minut/hour/day]
|
||||
# | PRECISION ['ms' | 'us' | 'ns', default: ms]
|
||||
# | REPLICA value [1 | 3, default: 1]
|
||||
# | WAL value [1 | 2, default: 1]
|
||||
# | WAL_LEVEL value [1 | 2, default: 1]
|
||||
# | VGROUPS value [default: 2]
|
||||
# | SINGLE_STABLE [0 | 1, default: ]
|
||||
#
|
||||
|
@ -62,7 +62,7 @@ print ============= create database with all options
|
|||
#$data7_db : keep
|
||||
#$data10_db : minrows
|
||||
#$data11_db : maxrows
|
||||
#$data12_db : wal
|
||||
#$data12_db : wal_level
|
||||
#$data13_db : fsync
|
||||
#$data14_db : comp
|
||||
#$data15_db : cachelast
|
||||
|
@ -122,10 +122,10 @@ endi
|
|||
if $data18_db != none then # cachelast
|
||||
return -1
|
||||
endi
|
||||
if $data20_db != 1 then # wal
|
||||
if $data20_db != 1 then # wal_level
|
||||
return -1
|
||||
endi
|
||||
if $data21_db != 3000 then # fsync
|
||||
if $data21_db != 3000 then # wal_fsync_period
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -254,8 +254,8 @@ sql_error create database db COMP -1
|
|||
#sql_error create database db KEEP 525600001m
|
||||
#sql_error create database db KEEP 365001d
|
||||
|
||||
print ====> FSYNC value [0 ~ 180000 ms, default: 3000]
|
||||
sql create database db FSYNC 0
|
||||
print ====> WAL_FSYNC_PERIOD value [0 ~ 180000 ms, default: 3000]
|
||||
sql create database db WAL_FSYNC_PERIOD 0
|
||||
sql show databases
|
||||
print $data0_db $data1_db $data2_db $data3_db $data4_db $data5_db $data6_db $data7_db $data8_db $data9_db $data10_db $data11_db $data12_db $data13_db $data14_db $data15_db $data16_db $data17_db
|
||||
if $data21_db != 0 then
|
||||
|
@ -263,15 +263,15 @@ if $data21_db != 0 then
|
|||
endi
|
||||
sql drop database db
|
||||
|
||||
sql create database db FSYNC 180000
|
||||
sql create database db WAL_FSYNC_PERIOD 180000
|
||||
sql show databases
|
||||
print $data0_db $data1_db $data2_db $data3_db $data4_db $data5_db $data6_db $data7_db $data8_db $data9_db $data10_db $data11_db $data12_db $data13_db $data14_db $data15_db $data16_db $data17_db
|
||||
if $data21_db != 180000 then
|
||||
return -1
|
||||
endi
|
||||
sql drop database db
|
||||
sql_error create database db FSYNC 180001
|
||||
sql_error create database db FSYNC -1
|
||||
sql_error create database db WAL_FSYNC_PERIOD 180001
|
||||
sql_error create database db WAL_FSYNC_PERIOD -1
|
||||
|
||||
print ====> MAXROWS value [200~10000, default: 4096], MINROWS value [10~1000, default: 100]
|
||||
sql create database db MAXROWS 10000 MINROWS 1000
|
||||
|
@ -386,8 +386,8 @@ sql_error create database db REPLICA 4
|
|||
#sql_error create database db TTL 0
|
||||
#sql_error create database db TTL -1
|
||||
|
||||
print ====> WAL value [1 | 2, default: 1]
|
||||
sql create database db WAL 2
|
||||
print ====> WAL_LEVEL value [1 | 2, default: 1]
|
||||
sql create database db WAL_LEVEL 2
|
||||
sql show databases
|
||||
print $data0_db $data1_db $data2_db $data3_db $data4_db $data5_db $data6_db $data7_db $data8_db $data9_db $data10_db $data11_db $data12_db $data13_db $data14_db $data15_db $data16_db $data17_db
|
||||
if $data20_db != 2 then
|
||||
|
@ -395,16 +395,16 @@ if $data20_db != 2 then
|
|||
endi
|
||||
sql drop database db
|
||||
|
||||
sql create database db WAL 1
|
||||
sql create database db WAL_LEVEL 1
|
||||
sql show databases
|
||||
print $data0_db $data1_db $data2_db $data3_db $data4_db $data5_db $data6_db $data7_db $data8_db $data9_db $data10_db $data11_db $data12_db $data13_db $data14_db $data15_db $data16_db $data17_db
|
||||
if $data20_db != 1 then
|
||||
return -1
|
||||
endi
|
||||
sql drop database db
|
||||
sql_error create database db WAL 3
|
||||
sql_error create database db WAL -1
|
||||
sql_error create database db WAL 0
|
||||
sql_error create database db WAL_LEVEL 3
|
||||
sql_error create database db WAL_LEVEL -1
|
||||
sql_error create database db WAL_LEVEL 0
|
||||
|
||||
print ====> VGROUPS value [1~4096, default: 2]
|
||||
sql create database db VGROUPS 1
|
||||
|
|
|
@ -107,7 +107,7 @@ $ctime = 36000 # 10 hours
|
|||
$wal = 1 # valid value is 1, 2
|
||||
$comp = 1 # max=32, automatically trimmed when exceeding
|
||||
|
||||
sql create database $db replica $replica duration $duration keep $keep maxrows $rows_db wal $wal comp $comp
|
||||
sql create database $db replica $replica duration $duration keep $keep maxrows $rows_db wal_level $wal comp $comp
|
||||
sql show databases
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
|
@ -225,13 +225,13 @@ sql_error create database $db ctime 29
|
|||
sql_error create database $db ctime 40961
|
||||
|
||||
# wal {0, 2}
|
||||
sql_error create database testwal wal 0
|
||||
sql_error create database testwal wal_level 0
|
||||
sql show databases
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql create database testwal wal 1
|
||||
sql create database testwal wal_level 1
|
||||
sql show databases
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
|
@ -243,7 +243,7 @@ if $data13_testwal != 1 then
|
|||
endi
|
||||
sql drop database testwal
|
||||
|
||||
sql create database testwal wal 2
|
||||
sql create database testwal wal_level 2
|
||||
sql show databases
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
|
@ -254,8 +254,8 @@ if $data13_testwal != 2 then
|
|||
endi
|
||||
sql drop database testwal
|
||||
|
||||
sql_error create database $db wal -1
|
||||
sql_error create database $db wal 3
|
||||
sql_error create database $db wal_level -1
|
||||
sql_error create database $db wal_level 3
|
||||
|
||||
# comp {0, 1, 2}
|
||||
sql_error create database $db comp -1
|
||||
|
|
|
@ -1,26 +1,6 @@
|
|||
#### length, char_length, lower, upper, ltrim, rtrim, concat, concat_ws, substr.
|
||||
|
||||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
$loop_cnt = 0
|
||||
check_dnode_ready:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 10 then
|
||||
print ====> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print ===> $rows $data00 $data01 $data02 $data03 $data04 $data05
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data04 != ready then
|
||||
goto check_dnode_ready
|
||||
endi
|
||||
|
||||
sql connect
|
||||
|
||||
$vgroups = 4
|
||||
|
|
|
@ -1,25 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
$loop_cnt = 0
|
||||
check_dnode_ready:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 10 then
|
||||
print ====> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show dnodes
|
||||
print ===> $rows $data00 $data01 $data02 $data03 $data04 $data05
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data04 != ready then
|
||||
goto check_dnode_ready
|
||||
endi
|
||||
|
||||
sql connect
|
||||
|
||||
print =============== create database
|
||||
|
|
|
@ -1,25 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
$loop_cnt = 0
|
||||
check_dnode_ready:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 10 then
|
||||
print ====> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show dnodes
|
||||
print ===> $rows $data00 $data01 $data02 $data03 $data04 $data05
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data04 != ready then
|
||||
goto check_dnode_ready
|
||||
endi
|
||||
|
||||
sql connect
|
||||
|
||||
print =============== create database
|
||||
|
|
|
@ -1,25 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
$loop_cnt = 0
|
||||
check_dnode_ready:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 10 then
|
||||
print ====> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show dnodes
|
||||
print ===> $rows $data00 $data01 $data02 $data03 $data04 $data05
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data04 != ready then
|
||||
goto check_dnode_ready
|
||||
endi
|
||||
|
||||
sql connect
|
||||
|
||||
print =============== create database
|
||||
|
|
|
@ -1,25 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
$loop_cnt = 0
|
||||
check_dnode_ready:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 10 then
|
||||
print ====> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show dnodes
|
||||
print ===> $rows $data00 $data01 $data02 $data03 $data04 $data05
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data04 != ready then
|
||||
goto check_dnode_ready
|
||||
endi
|
||||
|
||||
sql connect
|
||||
|
||||
print =============== create database
|
||||
|
|
|
@ -1,25 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
$loop_cnt = 0
|
||||
check_dnode_ready:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 10 then
|
||||
print ====> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show dnodes
|
||||
print ===> $rows $data00 $data01 $data02 $data03 $data04 $data05
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data04 != ready then
|
||||
goto check_dnode_ready
|
||||
endi
|
||||
|
||||
sql connect
|
||||
|
||||
print =============== create database
|
||||
|
|
|
@ -1,25 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
$loop_cnt = 0
|
||||
check_dnode_ready:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 10 then
|
||||
print ====> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show dnodes
|
||||
print ===> $rows $data00 $data01 $data02 $data03 $data04 $data05
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data04 != ready then
|
||||
goto check_dnode_ready
|
||||
endi
|
||||
|
||||
sql connect
|
||||
|
||||
print =============== create database
|
||||
|
|
|
@ -1,26 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
$loop_cnt = 0
|
||||
check_dnode_ready:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 10 then
|
||||
print ====> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print ===> $rows $data00 $data01 $data02 $data03 $data04 $data05
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data04 != ready then
|
||||
goto check_dnode_ready
|
||||
endi
|
||||
|
||||
sql connect
|
||||
|
||||
$dbPrefix = db
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
|
||||
print ========= start dnode1 as leader
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 500
|
||||
sql connect
|
||||
|
||||
print =============== create database
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wal -v 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 500
|
||||
sql connect
|
||||
|
||||
$dbPrefix = m_in_db
|
||||
|
|
|
@ -1,12 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wallevel -v 2
|
||||
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
|
||||
|
||||
print ========= start dnode1 as leader
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 2000
|
||||
sql connect
|
||||
|
||||
sql create database abc1 vgroups 2;
|
||||
|
|
|
@ -1,26 +1,6 @@
|
|||
#### abs, log, pow, sqrt, sin, cos, tan, asin, acos, atan, ceil, floor, round
|
||||
|
||||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
$loop_cnt = 0
|
||||
check_dnode_ready:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 10 then
|
||||
print ====> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print ===> $rows $data00 $data01 $data02 $data03 $data04 $data05
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data04 != ready then
|
||||
goto check_dnode_ready
|
||||
endi
|
||||
|
||||
sql connect
|
||||
|
||||
$vgroups = 4
|
||||
|
|
|
@ -1,12 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wallevel -v 2
|
||||
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
|
||||
|
||||
print ========= start dnode1 as leader
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 2000
|
||||
sql connect
|
||||
|
||||
print ======== step1
|
||||
|
|
|
@ -1,26 +1,6 @@
|
|||
#### session windows
|
||||
|
||||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
$loop_cnt = 0
|
||||
check_dnode_ready:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 10 then
|
||||
print ====> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print ===> $rows $data00 $data01 $data02 $data03 $data04 $data05
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data04 != ready then
|
||||
goto check_dnode_ready
|
||||
endi
|
||||
|
||||
sql connect
|
||||
|
||||
$vgroups = 4
|
||||
|
|
|
@ -1,25 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
$loop_cnt = 0
|
||||
check_dnode_ready:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 10 then
|
||||
print ====> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show dnodes
|
||||
print ===> $rows $data00 $data01 $data02 $data03 $data04 $data05
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data04 != ready then
|
||||
goto check_dnode_ready
|
||||
endi
|
||||
|
||||
sql connect
|
||||
|
||||
print =============== create database
|
||||
|
|
|
@ -1,25 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
$loop_cnt = 0
|
||||
check_dnode_ready:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 10 then
|
||||
print ====> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show dnodes
|
||||
print ===> $rows $data00 $data01 $data02 $data03 $data04 $data05
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data04 != ready then
|
||||
goto check_dnode_ready
|
||||
endi
|
||||
|
||||
sql connect
|
||||
|
||||
print =============== create database
|
||||
|
|
|
@ -45,7 +45,17 @@ while $i < $tbNum
|
|||
endw
|
||||
|
||||
print =============== step3: tb
|
||||
sql select count(1) from tb1
|
||||
sql explain analyze select ts from stb where -2;
|
||||
sql explain analyze select ts from tb1;
|
||||
sql explain analyze select ts from stb order by ts;
|
||||
sql explain analyze select * from information_schema.user_stables;
|
||||
sql explain analyze select count(*),sum(tbcol) from tb1;
|
||||
sql explain analyze select count(*),sum(tbcol) from stb;
|
||||
sql explain analyze select count(*),sum(tbcol) from stb group by tbcol;
|
||||
sql explain analyze select * from information_schema.user_stables;
|
||||
sql explain analyze verbose true select * from information_schema.user_stables where db_name='db2';
|
||||
sql explain analyze verbose true select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
|
||||
sql explain select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
|
||||
|
||||
_OVER:
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
|
|
|
@ -29,7 +29,7 @@ $rowNum = 10
|
|||
print =============== step2: prepare data
|
||||
sql create database db vgroups 2
|
||||
sql use db
|
||||
sql create table if not exists stb (ts timestamp, tbcol int, tbcol2 float, tbcol3 double) tags (tgcol int unsigned)
|
||||
sql create table if not exists stb (ts timestamp, tbcol int, tbcol2 float, tbcol3 double, tbcol4 binary(30), tbcol5 binary(30)) tags (tgcol int unsigned)
|
||||
|
||||
$i = 0
|
||||
while $i < $tbNum
|
||||
|
@ -39,13 +39,13 @@ while $i < $tbNum
|
|||
while $x < $rowNum
|
||||
$cc = $x * 60000
|
||||
$ms = 1601481600000 + $cc
|
||||
sql insert into $tb values ($ms , $x , $x , $x )
|
||||
sql insert into $tb values ($ms , $x , $x , $x , "abcd1234=-+*" , "123456 0" )
|
||||
$x = $x + 1
|
||||
endw
|
||||
|
||||
$cc = $x * 60000
|
||||
$ms = 1601481600000 + $cc
|
||||
sql insert into $tb values ($ms , NULL , NULL , NULL )
|
||||
sql insert into $tb values ($ms , NULL , NULL , NULL , NULL , NULL )
|
||||
$i = $i + 1
|
||||
endw
|
||||
|
||||
|
@ -68,6 +68,13 @@ sql select first(tbcol), last(tbcol) as b from tb1 where ts <= 1601481840000 int
|
|||
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from tb1 where ts <= 1601481840000 partition by tgcol interval(1m)
|
||||
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
|
||||
sql select last_row(*) from tb1 where tbcol > 5 and tbcol < 20
|
||||
sql select _wstart, _wend, _wduration, _qstart, _qend, count(*) from tb1 interval(10s, 2s) sliding(10s)
|
||||
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
|
||||
sql select log(tbcol), abs(tbcol), pow(tbcol, 2), sqrt(tbcol), sin(tbcol), cos(tbcol), tan(tbcol), asin(tbcol), acos(tbcol), atan(tbcol), ceil(tbcol), floor(tbcol), round(tbcol), atan(tbcol) from tb1
|
||||
sql select length("abcd1234"), char_length("abcd1234=-+*") from tb1
|
||||
sql select tbcol4, length(tbcol4), lower(tbcol4), upper(tbcol4), ltrim(tbcol4), rtrim(tbcol4), concat(tbcol4, tbcol5), concat_ws('_', tbcol4, tbcol5), substr(tbcol4, 1, 4) from tb1
|
||||
sql select * from tb1 where tbcol not in (1,2,3,null);
|
||||
sql select * from tb1 where tbcol + 3 <> null;
|
||||
|
||||
print =============== step4: stb
|
||||
sql select avg(tbcol) as c from stb
|
||||
|
@ -88,6 +95,15 @@ sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 par
|
|||
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from stb where ts <= 1601481840000 partition by tgcol interval(1m)
|
||||
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
|
||||
sql select last_row(tbcol), stddev(tbcol) from stb where tbcol > 5 and tbcol < 20 group by tgcol
|
||||
sql select _wstart, _wend, _wduration, _qstart, _qend, count(*) from stb interval(10s, 2s) sliding(10s)
|
||||
sql select log(tbcol), abs(tbcol), pow(tbcol, 2), sqrt(tbcol), sin(tbcol), cos(tbcol), tan(tbcol), asin(tbcol), acos(tbcol), atan(tbcol), ceil(tbcol), floor(tbcol), round(tbcol), atan(tbcol) from stb
|
||||
sql select length("abcd1234"), char_length("abcd1234=-+*") from stb
|
||||
sql select tbcol4, length(tbcol4), lower(tbcol4), upper(tbcol4), ltrim(tbcol4), rtrim(tbcol4), concat(tbcol4, tbcol5), concat_ws('_', tbcol4, tbcol5), substr(tbcol4, 1, 4) from stb
|
||||
sql select * from stb where tbcol not in (1,2,3,null);
|
||||
sql select * from stb where tbcol + 3 <> null;
|
||||
|
||||
print =============== step5: explain
|
||||
|
||||
|
||||
_OVER:
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
|
|
|
@ -49,35 +49,35 @@ class TDTestCase:
|
|||
fsync_index = i
|
||||
|
||||
tdSql.execute("drop database if exists db1")
|
||||
tdSql.execute("create database db1 wal 1")
|
||||
tdSql.execute("create database db1 wal_level 1")
|
||||
tdSql.query("show databases")
|
||||
for i in range(tdSql.queryRows):
|
||||
if tdSql.queryResult[i][0] == "db1":
|
||||
tdSql.checkData(i, wal_index, 1)
|
||||
|
||||
tdSql.execute("drop database if exists db1")
|
||||
tdSql.execute("create database db1 wal 2")
|
||||
tdSql.execute("create database db1 wal_level 2")
|
||||
tdSql.query("show databases")
|
||||
for i in range(tdSql.queryRows):
|
||||
if tdSql.queryResult[i][0] == "db1":
|
||||
tdSql.checkData(i, wal_index, 2)
|
||||
|
||||
tdSql.execute("drop database if exists db1")
|
||||
tdSql.execute("create database db1 fsync 0")
|
||||
tdSql.execute("create database db1 wal_fsync_period 0")
|
||||
tdSql.query("show databases")
|
||||
for i in range(tdSql.queryRows):
|
||||
if tdSql.queryResult[i][0] == "db1":
|
||||
tdSql.checkData(i, fsync_index, 0)
|
||||
|
||||
tdSql.execute("drop database if exists db1")
|
||||
tdSql.execute("create database db1 fsync 3000")
|
||||
tdSql.execute("create database db1 wal_fsync_period 3000")
|
||||
tdSql.query("show databases")
|
||||
for i in range(tdSql.queryRows):
|
||||
if tdSql.queryResult[i][0] == "db1":
|
||||
tdSql.checkData(i, fsync_index, 3000)
|
||||
|
||||
tdSql.execute("drop database if exists db1")
|
||||
tdSql.execute("create database db1 fsync 180000")
|
||||
tdSql.execute("create database db1 wal_fsync_period 180000")
|
||||
tdSql.query("show databases")
|
||||
for i in range(tdSql.queryRows):
|
||||
if tdSql.queryResult[i][0] == "db1":
|
||||
|
@ -85,7 +85,7 @@ class TDTestCase:
|
|||
|
||||
|
||||
tdSql.execute("drop database if exists db1")
|
||||
tdSql.execute("create database db1 wal 1 fsync 6000")
|
||||
tdSql.execute("create database db1 wal_level 1 wal_fsync_period 6000")
|
||||
tdSql.query("show databases")
|
||||
for i in range(tdSql.queryRows):
|
||||
if tdSql.queryResult[i][0] == "db1":
|
||||
|
@ -93,49 +93,49 @@ class TDTestCase:
|
|||
tdSql.checkData(i, wal_index, 1)
|
||||
|
||||
tdSql.execute("drop database if exists db1")
|
||||
tdSql.execute("create database db1 wal 2 fsync 3000")
|
||||
tdSql.execute("create database db1 wal_level 2 wal_fsync_period 3000")
|
||||
tdSql.query("show databases")
|
||||
for i in range(tdSql.queryRows):
|
||||
if tdSql.queryResult[i][0] == "db1":
|
||||
tdSql.checkData(i, fsync_index, 3000)
|
||||
tdSql.checkData(i, wal_index, 2)
|
||||
|
||||
tdSql.execute("alter database db1 wal 1")
|
||||
tdSql.execute("alter database db1 wal_level 1")
|
||||
tdSql.query("show databases")
|
||||
for i in range(tdSql.queryRows):
|
||||
if tdSql.queryResult[i][0] == "db1":
|
||||
tdSql.checkData(i, fsync_index, 3000)
|
||||
tdSql.checkData(i, wal_index, 1)
|
||||
|
||||
tdSql.execute("alter database db1 wal 2")
|
||||
tdSql.execute("alter database db1 wal_level 2")
|
||||
tdSql.query("show databases")
|
||||
for i in range(tdSql.queryRows):
|
||||
if tdSql.queryResult[i][0] == "db1":
|
||||
tdSql.checkData(i, fsync_index, 3000)
|
||||
tdSql.checkData(i, wal_index, 2)
|
||||
|
||||
tdSql.execute("alter database db1 fsync 0")
|
||||
tdSql.execute("alter database db1 wal_fsync_period 0")
|
||||
tdSql.query("show databases")
|
||||
for i in range(tdSql.queryRows):
|
||||
if tdSql.queryResult[i][0] == "db1":
|
||||
tdSql.checkData(i, fsync_index, 0)
|
||||
tdSql.checkData(i, wal_index, 2)
|
||||
|
||||
tdSql.execute("alter database db1 fsync 3000")
|
||||
tdSql.execute("alter database db1 wal_fsync_period 3000")
|
||||
tdSql.query("show databases")
|
||||
for i in range(tdSql.queryRows):
|
||||
if tdSql.queryResult[i][0] == "db1":
|
||||
tdSql.checkData(i, fsync_index, 3000)
|
||||
tdSql.checkData(i, wal_index, 2)
|
||||
|
||||
tdSql.execute("alter database db1 fsync 18000")
|
||||
tdSql.execute("alter database db1 wal_fsync_period 18000")
|
||||
tdSql.query("show databases")
|
||||
for i in range(tdSql.queryRows):
|
||||
if tdSql.queryResult[i][0] == "db1":
|
||||
tdSql.checkData(i, fsync_index, 18000)
|
||||
tdSql.checkData(i, wal_index, 2)
|
||||
|
||||
tdSql.execute("alter database db1 wal 1 fsync 3000")
|
||||
tdSql.execute("alter database db1 wal_level 1 wal_fsync_period 3000")
|
||||
tdSql.query("show databases")
|
||||
for i in range(tdSql.queryRows):
|
||||
if tdSql.queryResult[i][0] == "db1":
|
||||
|
@ -147,29 +147,29 @@ class TDTestCase:
|
|||
@property
|
||||
def fsync_create_err(self):
|
||||
return [
|
||||
"create database db1 wal 0",
|
||||
"create database db1 wal 3",
|
||||
"create database db1 wal null",
|
||||
"create database db1 wal true",
|
||||
"create database db1 wal 1.1",
|
||||
"create database db1 fsync -1",
|
||||
"create database db1 fsync 180001",
|
||||
"create database db1 fsync 10.111",
|
||||
"create database db1 fsync true",
|
||||
"create database db1 wal_level 0",
|
||||
"create database db1 wal_level 3",
|
||||
"create database db1 wal_level null",
|
||||
"create database db1 wal_level true",
|
||||
"create database db1 wal_level 1.1",
|
||||
"create database db1 wal_fsync_period -1",
|
||||
"create database db1 wal_fsync_period 180001",
|
||||
"create database db1 wal_fsync_period 10.111",
|
||||
"create database db1 wal_fsync_period true",
|
||||
]
|
||||
|
||||
@property
|
||||
def fsync_alter_err(self):
|
||||
return [
|
||||
"alter database db1 wal 0",
|
||||
"alter database db1 wal 3",
|
||||
"alter database db1 wal null",
|
||||
"alter database db1 wal true",
|
||||
"alter database db1 wal 1.1",
|
||||
"alter database db1 fsync -1",
|
||||
"alter database db1 fsync 180001",
|
||||
"alter database db1 fsync 10.111",
|
||||
"alter database db1 fsync true",
|
||||
"alter database db1 wal_level 0",
|
||||
"alter database db1 wal_level 3",
|
||||
"alter database db1 wal_level null",
|
||||
"alter database db1 wal_level true",
|
||||
"alter database db1 wal_level 1.1",
|
||||
"alter database db1 wal_fsync_period -1",
|
||||
"alter database db1 wal_fsync_period 180001",
|
||||
"alter database db1 wal_fsync_period 10.111",
|
||||
"alter database db1 wal_fsync_period true",
|
||||
]
|
||||
|
||||
def test_fsync_err(self):
|
||||
|
@ -290,7 +290,7 @@ class TDTestCase:
|
|||
|
||||
# tdSql.execute("use db")
|
||||
|
||||
tdLog.printNoPrefix("==========step4:after wal, all check again ")
|
||||
tdLog.printNoPrefix("==========step4:after wal_level, all check again ")
|
||||
self.all_test()
|
||||
|
||||
def stop(self):
|
||||
|
|
Loading…
Reference in New Issue