diff --git a/.gitmodules b/.gitmodules
deleted file mode 100644
index 7d5515f8c5..0000000000
--- a/.gitmodules
+++ /dev/null
@@ -1,12 +0,0 @@
-[submodule "src/connector/go"]
- path = src/connector/go
- url = git@github.com:taosdata/driver-go.git
-[submodule "src/connector/hivemq-tdengine-extension"]
- path = src/connector/hivemq-tdengine-extension
- url = git@github.com:taosdata/hivemq-tdengine-extension.git
-[submodule "deps/TSZ"]
- path = deps/TSZ
- url = https://github.com/taosdata/TSZ.git
-[submodule "examples/rust"]
- path = examples/rust
- url = https://github.com/songtianyi/tdengine-rust-bindings.git
diff --git a/Jenkinsfile2 b/Jenkinsfile2
index 83fa1479dc..4b47d56a6c 100644
--- a/Jenkinsfile2
+++ b/Jenkinsfile2
@@ -118,6 +118,7 @@ def pre_test(){
git rm --cached tools/taos-tools 2>/dev/null || :
git rm --cached tools/taosadapter 2>/dev/null || :
git rm --cached tools/taosws-rs 2>/dev/null || :
+ git rm --cached examples/rust 2>/dev/null || :
'''
sh '''
cd ${WKC}
@@ -269,6 +270,7 @@ def pre_test_win(){
git rm --cached tools/taos-tools 2>nul
git rm --cached tools/taosadapter 2>nul
git rm --cached tools/taosws-rs 2>nul
+ git rm --cached examples/rust 2>nul
exit 0
'''
bat '''
@@ -443,7 +445,7 @@ pipeline {
}
}
}
- catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
+ /*catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 15, unit: 'MINUTES'){
script {
sh '''
@@ -455,7 +457,7 @@ pipeline {
'''
}
}
- }
+ }*/
}
}
}
diff --git a/cmake/cmake.options b/cmake/cmake.options
index 8b33353632..090b5d4135 100644
--- a/cmake/cmake.options
+++ b/cmake/cmake.options
@@ -47,15 +47,21 @@ IF(${TD_WINDOWS})
)
option(
- BUILD_TEST
- "If build unit tests using googletest"
- ON
- )
+ BUILD_TEST
+ "If build unit tests using googletest"
+ ON
+ )
option(
- TDENGINE_3
- "TDengine 3.x"
- ON
+ TDENGINE_3
+ "TDengine 3.x for taos-tools"
+ ON
+ )
+
+ option(
+ BUILD_CRASHDUMP
+ "If build crashdump on Windows"
+ ON
)
ELSEIF (TD_DARWIN_64)
@@ -84,6 +90,12 @@ ELSE ()
ENDIF ()
ENDIF ()
+option(
+ RUST_BINDINGS
+ "If build with rust-bindings"
+ ON
+ )
+
option(
JEMALLOC_ENABLED
"If build with jemalloc"
diff --git a/cmake/crashdump_CMakeLists.txt.in b/cmake/crashdump_CMakeLists.txt.in
new file mode 100644
index 0000000000..af4b551159
--- /dev/null
+++ b/cmake/crashdump_CMakeLists.txt.in
@@ -0,0 +1,12 @@
+
+# crashdump
+ExternalProject_Add(crashdump
+ GIT_REPOSITORY https://github.com/Arnavion/crashdump.git
+ GIT_TAG master
+ SOURCE_DIR "${TD_CONTRIB_DIR}/crashdump"
+ BINARY_DIR "${TD_CONTRIB_DIR}/crashdump"
+ CONFIGURE_COMMAND ""
+ BUILD_COMMAND ""
+ INSTALL_COMMAND ""
+ TEST_COMMAND ""
+ )
diff --git a/cmake/rust-bindings_CMakeLists.txt.in b/cmake/rust-bindings_CMakeLists.txt.in
new file mode 100644
index 0000000000..d16e86139b
--- /dev/null
+++ b/cmake/rust-bindings_CMakeLists.txt.in
@@ -0,0 +1,12 @@
+
+# rust-bindings
+ExternalProject_Add(rust-bindings
+ GIT_REPOSITORY https://github.com/songtianyi/tdengine-rust-bindings.git
+ GIT_TAG 7ed7a97
+ SOURCE_DIR "${TD_SOURCE_DIR}/examples/rust"
+ BINARY_DIR "${TD_SOURCE_DIR}/examples/rust"
+ CONFIGURE_COMMAND ""
+ BUILD_COMMAND ""
+ INSTALL_COMMAND ""
+ TEST_COMMAND ""
+ )
diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in
index af3b5af4a6..68af4e7fcb 100644
--- a/cmake/taosadapter_CMakeLists.txt.in
+++ b/cmake/taosadapter_CMakeLists.txt.in
@@ -1,5 +1,5 @@
-# zlib
+# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG df8678f
diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in
index d430add979..ea2142f299 100644
--- a/cmake/taostools_CMakeLists.txt.in
+++ b/cmake/taostools_CMakeLists.txt.in
@@ -1,8 +1,8 @@
-# zlib
+# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
- GIT_TAG 2.1.1
+ GIT_TAG 9dc2fec
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/cmake/taosws_CMakeLists.txt.in b/cmake/taosws_CMakeLists.txt.in
index 4b7c264472..3c1a7f5e73 100644
--- a/cmake/taosws_CMakeLists.txt.in
+++ b/cmake/taosws_CMakeLists.txt.in
@@ -1,5 +1,5 @@
-# zlib
+# taosws-rs
ExternalProject_Add(taosws-rs
GIT_REPOSITORY https://github.com/taosdata/taosws-rs.git
GIT_TAG 9de599d
diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt
index 384cffc08c..b4e8825431 100644
--- a/contrib/CMakeLists.txt
+++ b/contrib/CMakeLists.txt
@@ -27,10 +27,6 @@ else ()
cat("${TD_SUPPORT_DIR}/taosadapter_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif()
-if(TD_LINUX_64 AND JEMALLOC_ENABLED)
- cat("${TD_SUPPORT_DIR}/jemalloc_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
-endif()
-
# pthread
if(${BUILD_PTHREAD})
cat("${TD_SUPPORT_DIR}/pthread_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
@@ -109,6 +105,11 @@ if(${BUILD_WITH_SQLITE})
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(${BUILD_WITH_SQLITE})
+# rust-bindings
+if(${RUST_BINDINGS})
+ cat("${TD_SUPPORT_DIR}/rust-bindings_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
+endif(${RUST_BINDINGS})
+
# lucene
if(${BUILD_WITH_LUCENE})
cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
@@ -120,6 +121,11 @@ if(${BUILD_WITH_NURAFT})
cat("${TD_SUPPORT_DIR}/nuraft_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(${BUILD_WITH_NURAFT})
+# crashdump
+if(${BUILD_CRASHDUMP})
+ cat("${TD_SUPPORT_DIR}/crashdump_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
+endif(${BUILD_CRASHDUMP})
+
# addr2line
if(${BUILD_ADDR2LINE})
if(NOT ${TD_WINDOWS})
@@ -134,6 +140,24 @@ execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
+
+# clear submodule
+execute_process(COMMAND git submodule deinit -f tools/taos-tools
+ WORKING_DIRECTORY "${TD_SOURCE_DIR}")
+execute_process(COMMAND git rm --cached tools/taos-tools
+ WORKING_DIRECTORY "${TD_SOURCE_DIR}")
+execute_process(COMMAND git submodule deinit -f tools/taosadapter
+ WORKING_DIRECTORY "${TD_SOURCE_DIR}")
+execute_process(COMMAND git rm --cached tools/taosadapter
+ WORKING_DIRECTORY "${TD_SOURCE_DIR}")
+execute_process(COMMAND git submodule deinit -f tools/taosws-rs
+ WORKING_DIRECTORY "${TD_SOURCE_DIR}")
+execute_process(COMMAND git rm --cached tools/taosws-rs
+ WORKING_DIRECTORY "${TD_SOURCE_DIR}")
+execute_process(COMMAND git submodule deinit -f examples/rust
+ WORKING_DIRECTORY "${TD_SOURCE_DIR}")
+execute_process(COMMAND git rm --cached examples/rust
+ WORKING_DIRECTORY "${TD_SOURCE_DIR}")
# ================================================================================================
# Build
@@ -257,6 +281,16 @@ if(${BUILD_PTHREAD})
target_link_libraries(pthread INTERFACE libpthreadVC3)
endif()
+# crashdump
+if(${BUILD_CRASHDUMP})
+ add_executable(dumper "crashdump/dumper/dumper.c")
+ target_link_libraries(dumper User32.lib dbghelp.lib)
+ file(READ "crashdump/crasher/crasher.c" CRASHDUMP_CONTENT)
+ string(REPLACE "main(" "main_crashdump(" CRASHDUMP_CONTENT "${CRASHDUMP_CONTENT}")
+ file(WRITE "crashdump/crasher/crasher.c" "${CRASHDUMP_CONTENT}")
+ add_library(crashdump STATIC "crashdump/crasher/crasher.c")
+endif()
+
# iconv
if(${BUILD_WITH_ICONV})
add_library(iconv STATIC iconv/win_iconv.c)
@@ -403,18 +437,6 @@ if(${BUILD_ADDR2LINE})
endif(NOT ${TD_WINDOWS})
endif(${BUILD_ADDR2LINE})
-# jemalloc
-IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
- include(ExternalProject)
- ExternalProject_Add(jemalloc
- PREFIX "jemalloc"
- SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
- BUILD_IN_SOURCE 1
- CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/
- BUILD_COMMAND ${MAKE}
- )
- INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include)
-ENDIF ()
# ================================================================================================
# Build test
diff --git a/docs/en/07-develop/03-insert-data/03-opentsdb-telnet.mdx b/docs/en/07-develop/03-insert-data/03-opentsdb-telnet.mdx
index 18a695cda8..2061961e42 100644
--- a/docs/en/07-develop/03-insert-data/03-opentsdb-telnet.mdx
+++ b/docs/en/07-develop/03-insert-data/03-opentsdb-telnet.mdx
@@ -23,7 +23,7 @@ A single line of text is used in OpenTSDB line protocol to represent one row of
- `metric` will be used as the STable name.
- `timestamp` is the timestamp of current row of data. The time precision will be determined automatically based on the length of the timestamp. Second and millisecond time precision are supported.
-- `value` is a metric which must be a numeric value, the corresponding column name is "value".
+- `value` is a metric which must be a numeric value, the corresponding column name is "_value".
- The last part is the tag set separated by spaces, all tags will be converted to nchar type automatically.
For example:
@@ -74,7 +74,7 @@ taos> show STables;
Query OK, 2 row(s) in set (0.002544s)
taos> select tbname, * from `meters.current`;
- tbname | ts | value | groupid | location |
+ tbname | _ts | _value | groupid | location |
==================================================================================================================================
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LoSangeles |
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | California.LoSangeles |
diff --git a/docs/en/07-develop/03-insert-data/04-opentsdb-json.mdx b/docs/en/07-develop/03-insert-data/04-opentsdb-json.mdx
index 3a23944031..a8f3423787 100644
--- a/docs/en/07-develop/03-insert-data/04-opentsdb-json.mdx
+++ b/docs/en/07-develop/03-insert-data/04-opentsdb-json.mdx
@@ -91,7 +91,7 @@ taos> show STables;
Query OK, 2 row(s) in set (0.001954s)
taos> select * from `meters.current`;
- ts | value | groupid | location |
+ _ts | _value | groupid | location |
===================================================================================================================
2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | California.SanFrancisco |
2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco |
diff --git a/docs/zh/07-develop/03-insert-data/03-opentsdb-telnet.mdx b/docs/zh/07-develop/03-insert-data/03-opentsdb-telnet.mdx
index 2b397e1bdc..1cc402c3c0 100644
--- a/docs/zh/07-develop/03-insert-data/03-opentsdb-telnet.mdx
+++ b/docs/zh/07-develop/03-insert-data/03-opentsdb-telnet.mdx
@@ -23,7 +23,7 @@ OpenTSDB 行协议同样采用一行字符串来表示一行数据。OpenTSDB
- metric 将作为超级表名。
- timestamp 本行数据对应的时间戳。根据时间戳的长度自动识别时间精度。支持秒和毫秒两种时间精度
-- value 度量值,必须为一个数值。对应的列名也是 “value”。
+- value 度量值,必须为一个数值。对应的列名是 “_value”。
- 最后一部分是标签集, 用空格分隔不同标签, 所有标签自动转化为 nchar 数据类型;
例如:
@@ -74,7 +74,7 @@ taos> show stables;
Query OK, 2 row(s) in set (0.002544s)
taos> select tbname, * from `meters.current`;
- tbname | ts | value | groupid | location |
+ tbname | _ts | _value | groupid | location |
==================================================================================================================================
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LosAngeles |
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.250 | 11.300000000 | 3 | California.LosAngeles |
diff --git a/docs/zh/07-develop/03-insert-data/04-opentsdb-json.mdx b/docs/zh/07-develop/03-insert-data/04-opentsdb-json.mdx
index a15f80a585..09cb698fba 100644
--- a/docs/zh/07-develop/03-insert-data/04-opentsdb-json.mdx
+++ b/docs/zh/07-develop/03-insert-data/04-opentsdb-json.mdx
@@ -91,7 +91,7 @@ taos> show stables;
Query OK, 2 row(s) in set (0.001954s)
taos> select * from `meters.current`;
- ts | value | groupid | location |
+ _ts | _value | groupid | location |
===================================================================================================================
2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | California.SanFrancisco |
2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco |
diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md
index fefb50c541..845693a98e 100644
--- a/docs/zh/14-reference/12-config/index.md
+++ b/docs/zh/14-reference/12-config/index.md
@@ -1,6 +1,6 @@
---
title: 配置参数
-description: 'TDengine 客户端和服务配置列表'
+description: "TDengine 客户端和服务配置列表"
---
## 为服务端指定配置文件
@@ -21,8 +21,6 @@ taosd -C
TDengine 系统的前台交互客户端应用程序为 taos,以及应用驱动,它可以与 taosd 共享同一个配置文件 taos.cfg,也可以使用单独指定配置文件。运行 taos 时,使用参数-c 指定配置文件目录,如 taos -c /home/cfg,表示使用/home/cfg/目录下的 taos.cfg 配置文件中的参数,缺省目录是/etc/taos。更多 taos 的使用方法请见帮助信息 `taos --help`。
-**2.0.10.0 之后版本支持命令行以下参数显示当前客户端参数的配置**
-
```bash
taos -C
```
@@ -47,19 +45,19 @@ taos --dump-config
### firstEp
-| 属性 | 说明 |
-| -------- | --------------------------------------------------------------- |
-| 适用范围 | 服务端和客户端均适用 |
+| 属性 | 说明 |
+| -------- | -------------------------------------------------------------- |
+| 适用范围 | 服务端和客户端均适用 |
| 含义 | taosd 或者 taos 启动时,主动连接的集群中首个 dnode 的 endpoint |
-| 缺省值 | localhost:6030 |
+| 缺省值 | localhost:6030 |
### secondEp
-| 属性 | 说明 |
-| -------- | -------------------------------------------------------------------------------------- |
-| 适用范围 | 服务端和客户端均适用 |
+| 属性 | 说明 |
+| -------- | ------------------------------------------------------------------------------------- |
+| 适用范围 | 服务端和客户端均适用 |
| 含义 | taosd 或者 taos 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint |
-| 缺省值 | 无 |
+| 缺省值 | 无 |
### fqdn
@@ -77,7 +75,6 @@ taos --dump-config
| 适用范围 | 仅服务端适用 |
| 含义 | taosd 启动后,对外服务的端口号 |
| 缺省值 | 6030 |
-| 补充说明 | RESTful 服务在 2.4.0.0 之前(不含)由 taosd 提供,默认端口为 6041; 在 2.4.0.0 及后续版本由 taosAdapter,默认端口为 6041 |
:::note
确保集群中所有主机在端口 6030 上的 TCP 协议能够互通。(详细的端口情况请参见下表)
@@ -87,8 +84,8 @@ taos --dump-config
| TCP | 6030 | 客户端与服务端之间通讯,多节点集群的节点间通讯。 | 由配置文件设置 serverPort 决定。 |
| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。注意 taosAdapter 配置或有不同,请参考相应[文档](/reference/taosadapter/)。 |
| TCP | 6043 | TaosKeeper 监控服务端口。 | 随 TaosKeeper 启动参数设置变化。 |
-| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosAdapter 启动参数设置变化(2.3.0.1+以上版本)。 |
-| UDP | 6045 | 支持 collectd 数据接入端口。 | 随 taosAdapter 启动参数设置变化(2.3.0.1+以上版本)。 |
+| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosAdapter 启动参数设置变化|
+| UDP | 6045 | 支持 collectd 数据接入端口。 | 随 taosAdapter 启动参数设置变化 |
| TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | |
### maxShellConns
@@ -104,28 +101,28 @@ taos --dump-config
### monitor
-| 属性 | 说明 |
-| -------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
-| 适用范围 | 仅服务端适用 |
+| 属性 | 说明 |
+| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| 适用范围 | 仅服务端适用 |
| 含义 | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括 CPU、内存、硬盘、网络带宽的监控记录,监控信息将通过 HTTP 协议发送给由 `monitorFqdn` 和 `monitorProt` 指定的 TaosKeeper 监控服务 |
-| 取值范围 | 0:关闭监控服务, 1:激活监控服务。 |
-| 缺省值 | 1 |
+| 取值范围 | 0:关闭监控服务, 1:激活监控服务。 |
+| 缺省值 | 1 |
### monitorFqdn
-| 属性 | 说明 |
-| -------- | -------------------------------------------- |
-| 适用范围 | 仅服务端适用 |
+| 属性 | 说明 |
+| -------- | -------------------------- |
+| 适用范围 | 仅服务端适用 |
| 含义 | TaosKeeper 监控服务的 FQDN |
-| 缺省值 | 无 |
+| 缺省值 | 无 |
### monitorPort
-| 属性 | 说明 |
-| -------- | -------------------------------------------- |
-| 适用范围 | 仅服务端适用 |
+| 属性 | 说明 |
+| -------- | --------------------------- |
+| 适用范围 | 仅服务端适用 |
| 含义 | TaosKeeper 监控服务的端口号 |
-| 缺省值 | 6043 |
+| 缺省值 | 6043 |
### monitorInterval
@@ -134,10 +131,9 @@ taos --dump-config
| 适用范围 | 仅服务端适用 |
| 含义 | 监控数据库记录系统参数(CPU/内存)的时间间隔 |
| 单位 | 秒 |
-| 取值范围 | 1-200000 |
+| 取值范围 | 1-200000 |
| 缺省值 | 30 |
-
### telemetryReporting
| 属性 | 说明 |
@@ -149,25 +145,43 @@ taos --dump-config
## 查询相关
-### queryBufferSize
+### queryPolicy
+
+| 属性 | 说明 |
+| -------- | ----------------------------- |
+| 适用范围 | 仅客户端适用 |
+| 含义 | 查询语句的执行策略 |
+| 单位 | 无 |
+| 缺省值 | 1 |
+| 补充说明 | 1: 只使用 vnode,不使用 qnode |
+
+2: 没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行
+
+3: vnode 只运行扫描算子,其余算子均在 qnode 执行 |
+
+### querySmaOptimize
+
+| 属性 | 说明 |
+| -------- | -------------------- |
+| 适用范围 | 仅客户端适用 |
+| 含义 | sma index 的优化策略 |
+| 单位 | 无 |
+| 缺省值 | 0 |
+| 补充说明 |
+
+0: 表示不使用 sma index,永远从原始数据进行查询
+
+1: 表示使用 sma index,对符合的语句,直接从预计算的结果进行查询 |
-| 属性 | 说明 |
-| -------- | ------------------------------------------------------------------------------------------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | 为所有并发查询占用保留的内存大小。 |
-| 单位 | MB |
-| 缺省值 | 无 |
-| 补充说明 | 计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。
(2.0.15 以前的版本中,此参数的单位是字节) |
### maxNumOfDistinctRes
| 属性 | 说明 |
-| -------- | -------------------------------- |
+| -------- | -------------------------------- | --- |
| 适用范围 | 仅服务端适用 |
| 含义 | 允许返回的 distinct 结果最大行数 |
| 取值范围 | 默认值为 10 万,最大值 1 亿 |
| 缺省值 | 10 万 |
-| 补充说明 | 2.3 版本新增。 | |
## 区域相关
@@ -306,12 +320,12 @@ charset 的有效值是 UTF-8。
### supportVnodes
-| 属性 | 说明 |
-| -------- | ----------------------------------------------------------------------------------------------------------------------------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | dnode 支持的最大 vnode 数目 |
-| 取值范围 | 0-4096 |
-| 缺省值 | 256 |
+| 属性 | 说明 |
+| -------- | --------------------------- |
+| 适用范围 | 仅服务端适用 |
+| 含义 | dnode 支持的最大 vnode 数目 |
+| 取值范围 | 0-4096 |
+| 缺省值 | 256 |
## 时间相关
@@ -366,7 +380,6 @@ charset 的有效值是 UTF-8。
| 单位 | bytes |
| 取值范围 | 0: 对所有查询结果均进行压缩 >0: 查询结果中任意列大小超过该值的消息才进行压缩 -1: 不压缩 |
| 缺省值 | -1 |
-| 补充说明 | 2.3.0.0 版本新增。 |
## 日志相关
@@ -464,7 +477,7 @@ charset 的有效值是 UTF-8。
| 属性 | 说明 |
| -------- | -------------------- |
| 适用范围 | 服务端和客户端均适用 |
-| 含义 | query 模块的日志开关 |
+| 含义 | query 模块的日志开关 |
| 取值范围 | 同上 |
| 缺省值 | |
@@ -481,7 +494,7 @@ charset 的有效值是 UTF-8。
| 属性 | 说明 |
| -------- | -------------------- |
-| 适用范围 | 仅服务端适用 |
+| 适用范围 | 仅服务端适用 |
| 含义 | dnode 模块的日志开关 |
| 取值范围 | 同上 |
| 缺省值 | 135 |
@@ -490,28 +503,28 @@ charset 的有效值是 UTF-8。
| 属性 | 说明 |
| -------- | -------------------- |
-| 适用范围 | 仅服务端适用 |
+| 适用范围 | 仅服务端适用 |
| 含义 | vnode 模块的日志开关 |
| 取值范围 | 同上 |
| 缺省值 | |
### mDebugFlag
-| 属性 | 说明 |
-| -------- | ------------------ |
-| 适用范围 | 仅服务端适用 |
+| 属性 | 说明 |
+| -------- | -------------------- |
+| 适用范围 | 仅服务端适用 |
| 含义 | mnode 模块的日志开关 |
-| 取值范围 | 同上 |
-| 缺省值 | 135 |
+| 取值范围 | 同上 |
+| 缺省值 | 135 |
### wDebugFlag
-| 属性 | 说明 |
-| -------- | -------------------- |
-| 适用范围 | 仅服务端适用 |
-| 含义 | wal 模块的日志开关 |
-| 取值范围 | 同上 |
-| 缺省值 | 135 |
+| 属性 | 说明 |
+| -------- | ------------------ |
+| 适用范围 | 仅服务端适用 |
+| 含义 | wal 模块的日志开关 |
+| 取值范围 | 同上 |
+| 缺省值 | 135 |
### sDebugFlag
@@ -533,57 +546,86 @@ charset 的有效值是 UTF-8。
### tqDebugFlag
-| 属性 | 说明 |
-| -------- | ------------------- |
-| 适用范围 | 仅服务端适用 |
+| 属性 | 说明 |
+| -------- | ----------------- |
+| 适用范围 | 仅服务端适用 |
| 含义 | tq 模块的日志开关 |
-| 取值范围 | 同上 |
-| 缺省值 | |
+| 取值范围 | 同上 |
+| 缺省值 | |
### fsDebugFlag
-| 属性 | 说明 |
-| -------- | ------------------- |
-| 适用范围 | 仅服务端适用 |
+| 属性 | 说明 |
+| -------- | ----------------- |
+| 适用范围 | 仅服务端适用 |
| 含义 | fs 模块的日志开关 |
-| 取值范围 | 同上 |
-| 缺省值 | |
+| 取值范围 | 同上 |
+| 缺省值 | |
### udfDebugFlag
-| 属性 | 说明 |
-| -------- | ---------------------- |
-| 适用范围 | 仅服务端适用 |
+| 属性 | 说明 |
+| -------- | ------------------ |
+| 适用范围 | 仅服务端适用 |
| 含义 | UDF 模块的日志开关 |
-| 取值范围 | 同上 |
-| 缺省值 | |
+| 取值范围 | 同上 |
+| 缺省值 | |
### smaDebugFlag
-| 属性 | 说明 |
-| -------- | ---------------------- |
-| 适用范围 | 仅服务端适用 |
+| 属性 | 说明 |
+| -------- | ------------------ |
+| 适用范围 | 仅服务端适用 |
| 含义 | sma 模块的日志开关 |
-| 取值范围 | 同上 |
-| 缺省值 | |
+| 取值范围 | 同上 |
+| 缺省值 | |
### idxDebugFlag
-| 属性 | 说明 |
-| -------- | ---------------------- |
-| 适用范围 | 仅服务端适用 |
+| 属性 | 说明 |
+| -------- | -------------------- |
+| 适用范围 | 仅服务端适用 |
| 含义 | index 模块的日志开关 |
-| 取值范围 | 同上 |
-| 缺省值 | |
+| 取值范围 | 同上 |
+| 缺省值 | |
### tdbDebugFlag
-| 属性 | 说明 |
-| -------- | ---------------------- |
-| 适用范围 | 仅服务端适用 |
+| 属性 | 说明 |
+| -------- | ------------------ |
+| 适用范围 | 仅服务端适用 |
| 含义 | tdb 模块的日志开关 |
-| 取值范围 | 同上 |
-| 缺省值 | |
+| 取值范围 | 同上 |
+| 缺省值 | |
+
+## Schemaless 相关
+
+### smlChildTableName
+
+| 属性 | 说明 |
+| -------- | ------------------------- |
+| 适用范围 | 仅客户端适用 |
+| 含义 | schemaless 自定义的子表名 |
+| 类型 | 字符串 |
+| 缺省值 | 无 |
+
+### smlTagName
+
+| 属性 | 说明 |
+| -------- | ------------------------------------ |
+| 适用范围 | 仅客户端适用 |
+| 含义 | schemaless tag 为空时默认的 tag 名字 |
+| 类型 | 字符串 |
+| 缺省值 | _tag_null |
+
+### smlDataFormat
+
+| 属性 | 说明 |
+| -------- | ----------------------------- |
+| 适用范围 | 仅客户端适用 |
+| 含义 | schemaless 列数据是否顺序一致 |
+| 值域 | 0:不一致;1: 一致 |
+| 缺省值 | 1 |
## 其他
@@ -596,3 +638,12 @@ charset 的有效值是 UTF-8。
| 取值范围 | 0:否,1:是 |
| 缺省值 | 1 |
| 补充说明 | 不同的启动方式,生成 core 文件的目录如下:1、systemctl start taosd 启动:生成的 core 在根目录下
2、手动启动,就在 taosd 执行目录下。 |
+
+### udf
+
+| 属性 | 说明 |
+| -------- | ------------------ |
+| 适用范围 | 仅服务端适用 |
+| 含义 | 是否启动 udf 服务 |
+| 取值范围 | 0: 不启动;1:启动 |
+| 缺省值 | 1 |
diff --git a/docs/zh/21-tdinternal/01-arch.md b/docs/zh/21-tdinternal/01-arch.md
index 507ccea629..e60debc87f 100644
--- a/docs/zh/21-tdinternal/01-arch.md
+++ b/docs/zh/21-tdinternal/01-arch.md
@@ -5,7 +5,7 @@ title: 整体架构
## 集群与基本逻辑单元
-TDengine 的设计是基于单个硬件、软件系统不可靠,基于任何单台计算机都无法提供足够计算能力和存储能力处理海量数据的假设进行设计的。因此 TDengine 从研发的第一天起,就按照分布式高可靠架构进行设计,是支持水平扩展的,这样任何单台或多台服务器发生硬件故障或软件错误都不影响系统的可用性和可靠性。同时,通过节点虚拟化并辅以自动化负载均衡技术,TDengine 能最高效率地利用异构集群中的计算和存储资源降低硬件投资。
+TDengine 的设计是基于单个硬件、软件系统不可靠,基于任何单台计算机都无法提供足够计算能力和存储能力处理海量数据的假设进行设计的。因此 TDengine 从研发的第一天起,就按照分布式高可靠架构进行设计,是支持水平扩展的,这样任何单台或多台服务器发生硬件故障或软件错误都不影响系统的可用性和可靠性。同时,通过节点虚拟化并辅以负载均衡技术,TDengine 能最高效率地利用异构集群中的计算和存储资源降低硬件投资。
### 主要逻辑单元
@@ -19,45 +19,43 @@ TDengine 分布式架构的逻辑结构图如下:
**物理节点(pnode):** pnode 是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有 OS 的物理机、虚拟机或 Docker 容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine 完全依赖 FQDN 来进行网络通讯,如果不了解 FQDN,请看博文[《一篇文章说清楚 TDengine 的 FQDN》](https://www.taosdata.com/blog/2020/09/11/1824.html)。
-**数据节点(dnode):** dnode 是 TDengine 服务器侧执行代码 taosd 在物理节点上的一个运行实例,一个工作的系统必须有至少一个数据节点。dnode 包含零到多个逻辑的虚拟节点(vnode),零或者至多一个逻辑的管理节点(mnode)。dnode 在系统中的唯一标识由实例的 End Point(EP)决定。EP 是 dnode 所在物理节点的 FQDN(Fully Qualified Domain Name)和系统所配置的网络端口号(Port)的组合。通过配置不同的端口,一个物理节点(一台物理机、虚拟机或容器)可以运行多个实例,或有多个数据节点。
+**数据节点(dnode):** dnode 是 TDengine 服务器侧执行代码 taosd 在物理节点上的一个运行实例,一个工作的系统必须有至少一个数据节点。dnode 包含零到多个逻辑的虚拟节点(vnode),零或者至多一个逻辑的管理节点(mnode),零或者至多一个逻辑的弹性计算节点(qnode),零或者至多一个逻辑的流计算节点(snode)。dnode 在系统中的唯一标识由实例的 End Point(EP)决定。EP 是 dnode 所在物理节点的 FQDN(Fully Qualified Domain Name)和系统所配置的网络端口号(Port)的组合。通过配置不同的端口,一个物理节点(一台物理机、虚拟机或容器)可以运行多个实例,或有多个数据节点。
-**虚拟节点(vnode):** 为更好的支持数据分片、负载均衡,防止数据过热或倾斜,数据节点被虚拟化成多个虚拟节点(vnode,图中 V2,V3,V4 等)。每个 vnode 都是一个相对独立的工作单元,是时序数据存储的基本单元,具有独立的运行线程、内存空间与持久化存储的路径。一个 vnode 包含一定数量的表(数据采集点)。当创建一张新表时,系统会检查是否需要创建新的 vnode。一个数据节点上能创建的 vnode 的数量取决于该数据节点所在物理节点的硬件资源。一个 vnode 只属于一个 DB,但一个 DB 可以有多个 vnode。一个 vnode 除存储的时序数据外,也保存有所包含的表的 schema、标签值等。一个虚拟节点由所属的数据节点的 EP,以及所属的 VGroup ID 在系统内唯一标识,由管理节点创建并管理。
+**虚拟节点(vnode):** 为更好的支持数据分片、负载均衡,防止数据过热或倾斜,数据节点被虚拟化成多个虚拟节点(vnode,图中 V2,V3,V4 等)。每个 vnode 都是一个相对独立的工作单元,是时序数据存储的基本单元,具有独立的运行线程、内存空间与持久化存储的路径。一个 vnode 包含一定数量的表(数据采集点)。当创建一个新 DB 时,系统会创建新的 vnode。一个数据节点上能创建的 vnode 的数量取决于该数据节点所在物理节点的硬件资源。一个 vnode 只属于一个 DB,但一个 DB 可以有多个 vnode。一个 vnode 除存储的时序数据外,也保存有所包含的表的 schema、标签值等。一个虚拟节点由所属的数据节点的 EP,以及所属的 VGroup ID 在系统内唯一标识,由管理节点创建并管理。
-**管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中 M)。同时,管理节点也负责元数据(包括用户、数据库、表、静态标签等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(最多不超过 3 个)mnode,它们自动构建成为一个虚拟管理节点组(图中 M0,M1,M2)。mnode 间采用 master/slave 的机制进行管理,而且采取强一致方式进行数据同步,任何数据更新操作只能在 Master 上进行。mnode 集群的创建由系统自动完成,无需人工干预。每个 dnode 上至多有一个 mnode,由所属的数据节点的 EP 来唯一标识。每个 dnode 通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的 EP。
+**管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中 M)。同时,管理节点也负责元数据(包括用户、数据库、超级表等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(最多不超过 3 个)mnode,它们自动构建成为一个虚拟管理节点组(图中 M1,M2,M3)。mnode 支持多副本,采用 RAFT 一致性协议,保证系统的高可用与高可靠,任何数据更新操作只能在 Leader 上进行。mnode 集群的第一个节点在集群部署时自动完成,其他节点的创建与删除由用户通过 SQL 命令完成。每个 dnode 上至多有一个 mnode,由所属的数据节点的 EP 来唯一标识。每个 dnode 通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的 EP。
-**虚拟节点组(VGroup):** 不同数据节点上的 vnode 可以组成一个虚拟节点组(vgroup)来保证系统的高可靠。虚拟节点组内采取 master/slave 的方式进行管理。写操作只能在 master vnode 上进行,系统采用异步复制的方式将数据同步到 slave vnode,这样确保了一份数据在多个物理节点上有拷贝。一个 vgroup 里虚拟节点个数就是数据的副本数。如果一个 DB 的副本数为 N,系统必须有至少 N 数据节点。副本数在创建 DB 时通过参数 replica 可以指定,缺省为 1。使用 TDengine 的多副本特性,可以不再需要昂贵的磁盘阵列等存储设备,就可以获得同样的数据高可靠性。虚拟节点组由管理节点创建、管理,并且由管理节点分配一个系统唯一的 ID,VGroup ID。如果两个虚拟节点的 VGroup ID 相同,说明他们属于同一个组,数据互为备份。虚拟节点组里虚拟节点的个数是可以动态改变的,容许只有一个,也就是没有数据复制。VGroup ID 是永远不变的,即使一个虚拟节点组被删除,它的 ID 也不会被收回重复利用。
+**弹性计算节点(qnode):** 一个虚拟的逻辑单元,运行查询计算任务,也包括基于系统表来实现的 show 命令(图中 Q)。集群中可配置多个 qnode,在整个集群内部共享使用(图中 Q1,Q2,Q3)。qnode 不与具体的 DB 绑定,即一个 qnode 可以同时执行多个 DB 的查询任务。每个 dnode 上至多有一个 qnode,由所属的数据节点的 EP 来唯一标识。客户端通过与 mnode 交互,获取可用的 qnode 列表,当没有可用的 qnode 时,计算任务在 vnode 中执行。
-**Taosc** taosc 是 TDengine 给应用提供的驱动程序(driver),负责处理应用与集群的接口交互,提供 C/C++ 语言原生接口,内嵌于 JDBC、C#、Python、Go、Node.js 语言连接库里。应用都是通过 taosc 而不是直接连接集群中的数据节点与整个集群进行交互的。这个模块负责获取并缓存元数据;将插入、查询等请求转发到正确的数据节点;在把结果返回给应用时,还需要负责最后一级的聚合、排序、过滤等操作。对于 JDBC、C/C++、C#、Python、Go、Node.js 接口而言,这个模块是在应用所处的物理节点上运行。同时,为支持全分布式的 RESTful 接口,taosc 在 TDengine 集群的每个 dnode 上都有一运行实例。
+**流计算节点(snode):** 一个虚拟的逻辑单元,只运行流计算任务(图中 S)。集群中可配置多个 snode,在整个集群内部共享使用(图中 S1,S2,S3)。snode 不与具体的 stream 绑定,即一个 snode 可以同时执行多个 stream 的计算任务。每个 dnode 上至多有一个 snode,由所属的数据节点的 EP 来唯一标识。由 mnode 调度可用的 snode 完成流计算任务,当没有可用的 snode 时,流计算任务在 vnode 中执行。
+
+**虚拟节点组(VGroup):** 不同数据节点上的 vnode 可以组成一个虚拟节点组(vgroup),采用 RAFT 一致性协议,保证系统的高可用与高可靠。写操作只能在 leader vnode 上进行,系统采用异步复制的方式将数据同步到 follower vnode,这样确保了一份数据在多个物理节点上有拷贝。一个 vgroup 里虚拟节点个数就是数据的副本数。如果一个 DB 的副本数为 N,系统必须有至少 N 数据节点。副本数在创建 DB 时通过参数 replica 可以指定,缺省为 1。使用 TDengine 的多副本特性,可以不再需要昂贵的磁盘阵列等存储设备,就可以获得同样的数据高可靠性。虚拟节点组由管理节点创建、管理,并且由管理节点分配一个系统唯一的 ID,VGroup ID。如果两个虚拟节点的 VGroup ID 相同,说明他们属于同一个组,数据互为备份。虚拟节点组里虚拟节点的个数是可以动态改变的,容许只有一个,也就是没有数据复制。VGroup ID 是永远不变的,即使一个虚拟节点组被删除,它的 ID 也不会被收回重复利用。
+
+**Taosc** taosc 是 TDengine 给应用提供的驱动程序(driver),负责处理应用与集群的接口交互,提供 C/C++ 语言原生接口,内嵌于 JDBC、C#、Python、Go、Node.js 语言连接库里。应用都是通过 taosc 而不是直接连接集群中的数据节点与整个集群进行交互的。这个模块负责获取并缓存元数据;将插入、查询等请求转发到正确的数据节点;在把结果返回给应用时,还需要负责最后一级的聚合、排序、过滤等操作。对于 JDBC、C/C++、C#、Python、Go、Node.js 接口而言,这个模块是在应用所处的物理节点上运行。同时,taosc 也可以与 taosAdapter 交互,支持全分布式的 RESTful 接口。
### 节点之间的通讯
-**通讯方式:**TDengine 系统的各个数据节点之间,以及应用驱动与各数据节点之间的通讯是通过 TCP/UDP 进行的。因为考虑到物联网场景,数据写入的包一般不大,因此 TDengine 除采用 TCP 做传输之外,还采用 UDP 方式,因为 UDP 更加高效,而且不受连接数的限制。TDengine 实现了自己的超时、重传、确认等机制,以确保 UDP 的可靠传输。对于数据量不到 15K 的数据包,采取 UDP 的方式进行传输,超过 15K 的,或者是查询类的操作,自动采取 TCP 的方式进行传输。同时,TDengine 根据配置和数据包,会自动对数据进行压缩/解压缩,数字签名/认证等处理。对于数据节点之间的数据复制,只采用 TCP 方式进行数据传输。
+**通讯方式:**TDengine 系统的各个数据节点之间,以及应用驱动与各数据节点之间的通讯是通过 TCP 进行的。TDengine 根据配置和数据包,会自动对数据进行压缩/解压缩,数字签名/认证等处理。
**FQDN 配置:**一个数据节点有一个或多个 FQDN,可以在系统配置文件 taos.cfg 通过参数“fqdn”进行指定,如果没有指定,系统将自动获取计算机的 hostname 作为其 FQDN。如果节点没有配置 FQDN,可以直接将该节点的配置参数 fqdn 设置为它的 IP 地址。但不建议使用 IP,因为 IP 地址可变,一旦变化,将让集群无法正常工作。一个数据节点的 EP(End Point)由 FQDN + Port 组成。采用 FQDN,需要保证 DNS 服务正常工作,或者在节点以及应用所在的节点配置好 hosts 文件。另外,这个参数值的长度需要控制在 96 个字符以内。
-**端口配置:**一个数据节点对外的端口由 TDengine 的系统配置参数 serverPort 决定,对集群内部通讯的端口是 serverPort+5。为支持多线程高效的处理 UDP 数据,每个对内和对外的 UDP 连接,都需要占用 5 个连续的端口。
-
-- 集群内数据节点之间的数据复制操作占用一个 TCP 端口,是 serverPort+10。
-- 集群数据节点对外提供 RESTful 服务占用一个 TCP 端口,是 serverPort+11。
-- 集群内数据节点与 Arbitrator 节点之间通讯占用一个 TCP 端口,是 serverPort+12。
-
-因此一个数据节点总的端口范围为 serverPort 到 serverPort+12,总共 13 个 TCP/UDP 端口。确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。详细的端口情况请参见 [TDengine 2.0 端口说明](/train-faq/faq#port)
+**端口配置:**一个数据节点对外的端口由 TDengine 的系统配置参数 serverPort 决定,默认为 6030。
**集群对外连接:**TDengine 集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可,连接需要提供的网络参数是一数据节点的 End Point(FQDN 加配置的端口号)。通过命令行 CLI 启动应用 taos 时,可以通过选项-h 来指定数据节点的 FQDN,-P 来指定其配置的端口号,如果端口不配置,将采用 TDengine 的系统配置参数 serverPort。
-**集群内部通讯:**各个数据节点之间通过 TCP/UDP 进行连接。一个数据节点启动时,将获取 mnode 所在的 dnode 的 EP 信息,然后与系统中的 mnode 建立起连接,交换信息。获取 mnode 的 EP 信息有三步:
+**集群内部通讯:**各个数据节点之间通过 TCP 进行连接。一个数据节点启动时,将获取 mnode 所在的 dnode 的 EP 信息,然后与系统中的 mnode 建立起连接,交换信息。获取 mnode 的 EP 信息有三步:
-1. 检查 mnodeEpSet.json 文件是否存在,如果不存在或不能正常打开获得 mnode EP 信息,进入第二步;
+1. 检查 dnode.json 文件是否存在,如果不存在或不能正常打开获得 mnode EP 信息,进入第二步;
2. 检查系统配置文件 taos.cfg,获取节点配置参数 firstEp、secondEp(这两个参数指定的节点可以是不带 mnode 的普通节点,这样的话,节点被连接时会尝试重定向到 mnode 节点),如果不存在或者 taos.cfg 里没有这两个配置参数,或无效,进入第三步;
3. 将自己的 EP 设为 mnode EP,并独立运行起来。
获取 mnode EP 列表后,数据节点发起连接,如果连接成功,则成功加入进工作的集群,如果不成功,则尝试 mnode EP 列表中的下一个。如果都尝试了,但连接都仍然失败,则休眠几秒后,再进行尝试。
-**Mnode 的选择:**TDengine 逻辑上有管理节点,但没有单独的执行代码,服务器侧只有一套执行代码 taosd。那么哪个数据节点会是管理节点呢?这是系统自动决定的,无需任何人工干预。原则如下:一个数据节点启动时,会检查自己的 End Point,并与获取的 mnode EP List 进行比对,如果在其中,该数据节点认为自己应该启动 mnode 模块,成为 mnode。如果自己的 EP 不在 mnode EP List 里,则不启动 mnode 模块。在系统的运行过程中,由于负载均衡、宕机等原因,mnode 有可能迁移至新的 dnode,但一切都是透明的,无需人工干预,配置参数的修改,是 mnode 自己根据资源做出的决定。
+**Mnode 的选择:**TDengine 逻辑上有管理节点,但没有单独的执行代码,服务器侧只有一套执行代码 taosd。那么哪个数据节点会是管理节点呢?在集群部署时,第一个数据节点自动成为管理节点。集群中的其他管理节点的创建与删除,由用户通过 SQL 语句完成。
**新数据节点的加入:**系统有了一个数据节点后,就已经成为一个工作的系统。添加新的节点进集群时,有两个步骤,第一步:使用 TDengine CLI 连接到现有工作的数据节点,然后用命令“CREATE DNODE”将新的数据节点的 End Point 添加进去;第二步:在新的数据节点的系统配置参数文件 taos.cfg 里,将 firstEp,secondEp 参数设置为现有集群中任意两个数据节点的 EP 即可。具体添加的详细步骤请见详细的用户手册。这样就把集群一步一步的建立起来。
-**重定向:**无论是 dnode 还是 taosc,最先都是要发起与 mnode 的连接,但 mnode 是系统自动创建并维护的,因此对于用户来说,并不知道哪个 dnode 在运行 mnode。TDengine 只要求向系统中任何一个工作的 dnode 发起连接即可。因为任何一个正在运行的 dnode,都维护有目前运行的 mnode EP List。当收到一个来自新启动的 dnode 或 taosc 的连接请求,如果自己不是 mnode,则将 mnode EP List 回复给对方,taosc 或新启动的 dnode 收到这个 list,就重新尝试建立连接。当 mnode EP List 发生改变,通过节点之间的消息交互,各个数据节点就很快获取最新列表,并通知 taosc。
+**重定向:**无论是 dnode 还是 taosc,最先都是要发起与 mnode 的连接,由于 mnode 是可以动态调整的,所以对于用户来说,并不知道哪个 dnode 在运行 mnode。TDengine 只要求向系统中任何一个工作的 dnode 发起连接即可。因为任何一个正在运行的 dnode,都维护有目前运行的 mnode EP List。当收到一个来自新启动的 dnode 或 taosc 的连接请求,如果自己不是 mnode,则将 mnode EP List 回复给对方,taosc 或新启动的 dnode 收到这个 list,就重新尝试建立连接。当 mnode EP List 发生改变,通过节点之间的消息交互,各个数据节点就很快获取最新列表,并通知 taosc。
### 一个典型的消息流程
@@ -68,15 +66,17 @@ TDengine 分布式架构的逻辑结构图如下:
图 2 TDengine 典型的操作流程
1. 应用通过 JDBC 或其他 API 接口发起插入数据的请求。
-2. taosc 会检查缓存,看是否保存有该表的 meta data。如果有,直接到第 4 步。如果没有,taosc 将向 mnode 发出 get meta-data 请求。
-3. mnode 将该表的 meta-data 返回给 taosc。Meta-data 包含有该表的 schema,而且还有该表所属的 vgroup 信息(vnode ID 以及所在的 dnode 的 End Point,如果副本数为 N,就有 N 组 End Point)。如果 taosc 迟迟得不到 mnode 回应,而且存在多个 mnode,taosc 将向下一个 mnode 发出请求。
-4. taosc 向 master vnode 发起插入请求。
-5. vnode 插入数据后,给 taosc 一个应答,表示插入成功。如果 taosc 迟迟得不到 vnode 的回应,taosc 会认为该节点已经离线。这种情况下,如果被插入的数据库有多个副本,taosc 将向 vgroup 里下一个 vnode 发出插入请求。
-6. taosc 通知 APP,写入成功。
+2. taosc 会检查缓存,看是否保存有该表所在数据库的 vgroup-info 信息。如果有,直接到第 4 步。如果没有,taosc 将向 mnode 发出 get vgroup-info 请求。
+3. mnode 将该表所在数据库的 vgroup-info 返回给 taosc。Vgroup-info 包含数据库的 vgroup 分布信息(vnode ID 以及所在的 dnode 的 End Point,如果副本数为 N,就有 N 组 End Point),还包含每个 vgroup 中存储数据表的 hash 范围。如果 taosc 迟迟得不到 mnode 回应,而且存在多个 mnode,taosc 将向下一个 mnode 发出请求。
+4. taosc 会继续检查缓存,看是否保存有该表的 meta-data。如果有,直接到第 6 步。如果没有,taosc 将向 vnode 发出 get meta-data 请求。
+5. vnode 将该表的 meta-data 返回给 taosc。Meta-data 包含有该表的 schema。
+6. taosc 向 leader vnode 发起插入请求。
+7. vnode 插入数据后,给 taosc 一个应答,表示插入成功。如果 taosc 迟迟得不到 vnode 的回应,taosc 会认为该节点已经离线。这种情况下,如果被插入的数据库有多个副本,taosc 将向 vgroup 里下一个 vnode 发出插入请求。
+8. taosc 通知 APP,写入成功。
-对于第二和第三步,taosc 启动时,并不知道 mnode 的 End Point,因此会直接向配置的集群对外服务的 End Point 发起请求。如果接收到该请求的 dnode 并没有配置 mnode,该 dnode 会在回复的消息中告知 mnode EP 列表,这样 taosc 会重新向新的 mnode 的 EP 发出获取 meta-data 的请求。
+对于第二步,taosc 启动时,并不知道 mnode 的 End Point,因此会直接向配置的集群对外服务的 End Point 发起请求。如果接收到该请求的 dnode 并没有配置 mnode,该 dnode 会在回复的消息中告知 mnode EP 列表,这样 taosc 会重新向新的 mnode 的 EP 发出获取 meta-data 的请求。
-对于第四和第五步,没有缓存的情况下,taosc 无法知道虚拟节点组里谁是 master,就假设第一个 vnodeID 就是 master,向它发出请求。如果接收到请求的 vnode 并不是 master,它会在回复中告知谁是 master,这样 taosc 就向建议的 master vnode 发出请求。一旦得到插入成功的回复,taosc 会缓存 master 节点的信息。
+对于第四和第六步,没有缓存的情况下,taosc 无法知道虚拟节点组里谁是 leader,就假设第一个 vnodeID 就是 leader,向它发出请求。如果接收到请求的 vnode 并不是 leader,它会在回复中告知谁是 leader,这样 taosc 就向建议的 leader vnode 发出请求。一旦得到插入成功的回复,taosc 会缓存 leader 节点的信息。
上述是插入数据的流程,查询、计算的流程也完全一致。taosc 把这些复杂的流程全部封装屏蔽了,对于应用来说无感知也无需任何特别处理。
@@ -89,13 +89,13 @@ TDengine 分布式架构的逻辑结构图如下:
TDengine 存储的数据包括采集的时序数据以及库、表相关的元数据、标签数据等,这些数据具体分为三部分:
- 时序数据:存放于 vnode 里,由 data、head 和 last 三个文件组成,数据量大,查询量取决于应用场景。容许乱序写入,但暂时不支持删除操作,并且仅在 update 参数设置为 1 时允许更新操作。通过采用一个采集点一张表的模型,一个时间段的数据是连续存储,对单张表的写入是简单的追加操作,一次读,可以读到多条记录,这样保证对单个采集点的插入和查询操作,性能达到最优。
-- 标签数据:存放于 vnode 里的 meta 文件,支持增删改查四个标准操作。数据量不大,有 N 张表,就有 N 条记录,因此可以全内存存储。如果标签过滤操作很多,查询将十分频繁,因此 TDengine 支持多核多线程并发查询。只要计算资源足够,即使有数千万张表,过滤结果能毫秒级返回。
-- 元数据:存放于 mnode 里,包含系统节点、用户、DB、Table Schema 等信息,支持增删改查四个标准操作。这部分数据的量不大,可以全内存保存,而且由于客户端有缓存,查询量也不大。因此目前的设计虽是集中式存储管理,但不会构成性能瓶颈。
+- 数据表元数据:包含标签信息和 Table Schema 信息,存放于 vnode 里的 meta 文件,支持增删改查四个标准操作。数据量很大,有 N 张表,就有 N 条记录,因此采用 LRU 存储,支持标签数据的索引。TDengine 支持多核多线程并发查询。只要计算内存足够,元数据全内存存储,千万级别规模的标签数据过滤结果能毫秒级返回。在内存资源不足的情况下,仍然可以支持数千万张表的快速查询。
+- 数据库元数据:存放于 mnode 里,包含系统节点、用户、DB、STable Schema 等信息,支持增删改查四个标准操作。这部分数据的量不大,可以全内存保存,而且由于客户端有缓存,查询量也不大。因此目前的设计虽是集中式存储管理,但不会构成性能瓶颈。
与典型的 NoSQL 存储模型相比,TDengine 将标签数据与时序数据完全分离存储,它具有两大优势:
- 能够极大地降低标签数据存储的冗余度:一般的 NoSQL 数据库或时序数据库,采用的 K-V 存储,其中的 Key 包含时间戳、设备 ID、各种标签。每条记录都带有这些重复的内容,浪费存储空间。而且如果应用要在历史数据上增加、修改或删除标签,需要遍历数据,重写一遍,操作成本极其昂贵。
-- 能够实现极为高效的多表之间的聚合查询:做多表之间聚合查询时,先把符合标签过滤条件的表查找出来,然后再查找这些表相应的数据块,这样大幅减少要扫描的数据集,从而大幅提高查询效率。而且标签数据采用全内存的结构进行管理和维护,千万级别规模的标签数据查询可以在毫秒级别返回。
+- 能够实现极为高效的多表之间的聚合查询:做多表之间聚合查询时,先把符合标签过滤条件的表查找出来,然后再查找这些表相应的数据块,这样大幅减少要扫描的数据集,从而大幅提高查询效率。
### 数据分片
@@ -103,11 +103,11 @@ TDengine 存储的数据包括采集的时序数据以及库、表相关的元
vnode(虚拟数据节点)负责为采集的时序数据提供写入、查询和计算功能。为便于负载均衡、数据恢复、支持异构环境,TDengine 将一个数据节点根据其计算和存储资源切分为多个 vnode。这些 vnode 的管理是 TDengine 自动完成的,对应用完全透明。
-对于单独一个数据采集点,无论其数据量多大,一个 vnode(或 vgroup,如果副本数大于 1)有足够的计算资源和存储资源来处理(如果每秒生成一条 16 字节的记录,一年产生的原始数据不到 0.5G),因此 TDengine 将一张表(一个数据采集点)的所有数据都存放在一个 vnode 里,而不会让同一个采集点的数据分布到两个或多个 dnode 上。而且一个 vnode 可存储多个数据采集点(表)的数据,一个 vnode 可容纳的表的数目的上限为一百万。设计上,一个 vnode 里所有的表都属于同一个 DB。一个数据节点上,除非特殊配置,一个 DB 拥有的 vnode 数目不会超过系统核的数目。
+对于单独一个数据采集点,无论其数据量多大,一个 vnode(或 vgroup,如果副本数大于 1)有足够的计算资源和存储资源来处理(如果每秒生成一条 16 字节的记录,一年产生的原始数据不到 0.5G),因此 TDengine 将一张表(一个数据采集点)的所有数据都存放在一个 vnode 里,而不会让同一个采集点的数据分布到两个或多个 dnode 上。而且一个 vnode 可存储多个数据采集点(表)的数据,一个 vnode 可容纳的表的数目的上限为一百万。设计上,一个 vnode 里所有的表都属于同一个 DB。
-创建 DB 时,系统并不会马上分配资源。但当创建一张表时,系统将看是否有已经分配的 vnode,且该 vnode 是否有空余的表空间,如果有,立即在该有空位的 vnode 创建表。如果没有,系统将从集群中,根据当前的负载情况,在一个 dnode 上创建一新的 vnode,然后创建表。如果 DB 有多个副本,系统不是只创建一个 vnode,而是一个 vgroup(虚拟数据节点组)。系统对 vnode 的数目没有任何限制,仅仅受限于物理节点本身的计算和存储资源。
+TDengine 3.0 采用 hash 一致性算法,确定每张数据表所在的 vnode。创建 DB 时,系统会立刻分配指定数目的 vnode,并确定每个 vnode 所负责的数据表范围。当创建一张表时,系统根据数据表名计算出所在的 vnodeID,立即在该 vnode 创建表。如果 DB 有多个副本,系统不是只创建一个 vnode,而是一个 vgroup(虚拟数据节点组)。系统对 vnode 的数目没有任何限制,仅仅受限于物理节点本身的计算和存储资源。
-每张表的 meta data(包含 schema,标签等)也存放于 vnode 里,而不是集中存放于 mnode,实际上这是对 Meta 数据的分片,这样便于高效并行的进行标签过滤操作。
+每张表的 meta data(包含 schema,标签等)也存放于 vnode 里,而不是集中存放于 mnode,实际上这是对 meta 数据的分片,这样便于高效并行的进行标签过滤操作。
### 数据分区
@@ -117,77 +117,68 @@ TDengine 除 vnode 分片之外,还对时序数据按照时间段进行分区
### 负载均衡
-每个 dnode 都定时向 mnode(虚拟管理节点)报告其状态(包括硬盘空间、内存大小、CPU、网络、虚拟节点个数等),因此 mnode 了解整个集群的状态。基于整体状态,当 mnode 发现某个 dnode 负载过重,它会将 dnode 上的一个或多个 vnode 挪到其他 dnode。在挪动过程中,对外服务继续进行,数据插入、查询和计算操作都不受影响。
+每个 dnode 都定时向 mnode(虚拟管理节点)报告其状态(包括硬盘空间、内存大小、CPU、网络、虚拟节点个数等),因此 mnode 了解整个集群的状态。基于整体状态,当 mnode 发现某个 dnode 负载过重,它会将 dnode 上的一个或多个 vnode 挪到其他 dnode。在挪动过程中,对外服务继续进行,数据插入、查询和计算操作都不受影响。负载均衡的触发时间,由用户指定。
-如果 mnode 一段时间没有收到 dnode 的状态报告,mnode 会认为这个 dnode 已经离线。如果离线时间超过一定时长(时长由配置参数 offlineThreshold 决定),该 dnode 将被 mnode 强制剔除出集群。该 dnode 上的 vnodes 如果副本数大于 1,系统将自动在其他 dnode 上创建新的副本,以保证数据的副本数。如果该 dnode 上还有 mnode,而且 mnode 的副本数大于 1,系统也将自动在其他 dnode 上创建新的 mnode,以保证 mnode 的副本数。
-
-当新的数据节点被添加进集群,因为新的计算和存储被添加进来,系统也将自动启动负载均衡流程。
-
-负载均衡过程无需任何人工干预,应用也无需重启,将自动连接新的节点,完全透明。
+当新的数据节点被添加进集群,因为新的计算和存储被添加进来,用户需要手动触发负载均衡流程,使得系统在最优的情况下运行。
**提示:负载均衡由参数 balance 控制,决定开启/关闭自动负载均衡。**
## 数据写入与复制流程
-如果一个数据库有 N 个副本,那一个虚拟节点组就有 N 个虚拟节点,但是只有一个是 master,其他都是 slave。当应用将新的记录写入系统时,只有 master vnode 能接受写的请求。如果 slave vnode 收到写的请求,系统将通知 taosc 需要重新定向。
+如果一个数据库有 N 个副本,那一个虚拟节点组就有 N 个虚拟节点,但是只有一个是 leader,其他都是 follower。当应用将新的记录写入系统时,只有 leader vnode 能接受写的请求。如果 follower vnode 收到写的请求,系统将通知 taosc 需要重新定向。
-### Master Vnode 写入流程
+### Leader Vnode 写入流程
-Master Vnode 遵循下面的写入流程:
+Leader Vnode 遵循下面的写入流程:
-
+
- 图 3 TDengine Master 写入流程
+ 图 3 TDengine Leader 写入流程
-1. master vnode 收到应用的数据插入请求,验证 OK,进入下一步;
-2. 如果系统配置参数 walLevel 大于 0,vnode 将把该请求的原始数据包写入数据库日志文件 WAL。如果 walLevel 设置为 2,而且 fsync 设置为 0,TDengine 还将 WAL 数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失;
-3. 如果有多个副本,vnode 将把数据包转发给同一虚拟节点组内的 slave vnodes,该转发包带有数据的版本号(version);
-4. 写入内存,并将记录加入到 skip list;
-5. master vnode 返回确认信息给应用,表示写入成功;
-6. 如果第 2、3、4 步中任何一步失败,将直接返回错误给应用。
+1. leader vnode 收到应用的数据插入请求,验证 OK,进入下一步;
+2. vnode 将该请求的原始数据包写入数据库日志文件 WAL。如果 walLevel 设置为 2,而且 fsync 设置为 0,TDengine 还将 WAL 数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失;
+3. 如果有多个副本,vnode 将把数据包转发给同一虚拟节点组内的 follower vnodes,该转发包带有数据的版本号(version);
+4. 写入内存,并将记录加入到 skip list。但如果未达成一致,会触发回滚操作;
+5. leader vnode 返回确认信息给应用,表示写入成功;
+6. 如果第 2、3、4 步中任何一步失败,将直接返回错误给应用;
-### Slave Vnode 写入流程
+### Follower Vnode 写入流程
-对于 slave vnode,写入流程是:
+对于 follower vnode,写入流程是:
-
+
- 图 4 TDengine Slave 写入流程
+ 图 4 TDengine Follower 写入流程
-1. slave vnode 收到 Master vnode 转发了的数据插入请求。检查 last version 是否与 master 一致,如果一致,进入下一步。如果不一致,需要进入同步状态。
-2. 如果系统配置参数 walLevel 大于 0,vnode 将把该请求的原始数据包写入数据库日志文件 WAL。如果 walLevel 设置为 2,而且 fsync 设置为 0,TDengine 还将 WAL 数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失。
+1. follower vnode 收到 leader vnode 转发了的数据插入请求。
+2. vnode 将把该请求的原始数据包写入数据库日志文件 WAL。如果 walLevel 设置为 2,而且 fsync 设置为 0,TDengine 还将 WAL 数据立即落盘,以保证即使宕机,也能从数据库日志文件中恢复数据,避免数据的丢失。
3. 写入内存,更新内存中的 skip list。
-与 master vnode 相比,slave vnode 不存在转发环节,也不存在回复确认环节,少了两步。但写内存与 WAL 是完全一样的。
+与 leader vnode 相比,follower vnode 不存在转发环节,也不存在回复确认环节,少了两步。但写内存与 WAL 是完全一样的。
### 主从选择
Vnode 会保持一个数据版本号(version),对内存数据进行持久化存储时,对该版本号也进行持久化存储。每个数据更新操作,无论是采集的时序数据还是元数据,这个版本号将增加 1。
-一个 vnode 启动时,角色(master、slave)是不定的,数据是处于未同步状态,它需要与虚拟节点组内其他节点建立 TCP 连接,并互相交换 status,其中包括 version 和自己的角色。通过 status 的交换,系统进入选主流程,规则如下:
+一个 vnode 启动时,角色(leader、follower)是不定的,数据是处于未同步状态,它需要与虚拟节点组内其他节点建立 TCP 连接,并互相交换 status,按照标准的 raft 一致性算法完成选主。
-1. 如果只有一个副本,该副本永远就是 master
-2. 所有副本都在线时,版本最高的被选为 master
-3. 在线的虚拟节点数过半,而且有虚拟节点是 slave 的话,该虚拟节点自动成为 master
-4. 对于 2 和 3,如果多个虚拟节点满足成为 master 的要求,那么虚拟节点组的节点列表里,最前面的选为 master
-
-更多的关于数据复制的流程,请见[《TDengine 2.0 数据复制模块设计》](/tdinternal/replica/)。
+更多的关于数据复制的流程,请见[《TDengine 3.0 数据复制模块设计》](/tdinternal/replica/)。
### 同步复制
-对于数据一致性要求更高的场景,异步数据复制无法满足要求,因为有极小的概率丢失数据,因此 TDengine 提供同步复制的机制供用户选择。在创建数据库时,除指定副本数 replica 之外,用户还需要指定新的参数 quorum。如果 quorum 大于 1,它表示每次 master 转发给副本时,需要等待 quorum-1 个回复确认,才能通知应用,数据在 slave 已经写入成功。如果在一定的时间内,得不到 quorum-1 个回复确认,master vnode 将返回错误给应用。
+对于数据一致性要求更高的场景,异步数据复制提供的最终一致性无法满足要求。因此 TDengine 提供同步复制的机制供用户选择。在创建数据库时,除指定副本数 replica 之外,用户还需要指定新的参数 strict。如果 strict 等于 1,它表示每次 leader 转发给副本时,需要等待半数以上副本达成一致后,才能通知应用,数据在 follower 已经写入成功。如果在一定的时间内,得不到半数以上副本的确认,leader vnode 将返回错误给应用。
采用同步复制,系统的性能会有所下降,而且 latency 会增加。因为元数据要强一致,mnode 之间的数据同步缺省就是采用的同步复制。
## 缓存与持久化
-### 缓存
+### 时序数据缓存
TDengine 采用时间驱动缓存管理策略(First-In-First-Out,FIFO),又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式(Least-Recent-Used,LRU),直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候,将最早的数据批量写入磁盘。一般意义上来说,对于物联网数据的使用,用户最为关心的是刚产生的数据,即当前状态。TDengine 充分利用这一特性,将最近到达的(当前状态)数据保存在缓存中。
TDengine 通过查询函数向用户提供毫秒级的数据获取能力。直接将最近到达的数据保存在缓存中,可以更加快速地响应用户针对最近一条或一批数据的查询分析,整体上提供更快的数据库查询响应能力。从这个意义上来说,**可通过设置合适的配置参数将 TDengine 作为数据缓存来使用,而不需要再部署 Redis 或其他额外的缓存系统**,可有效地简化系统架构,降低运维的成本。需要注意的是,TDengine 重启以后系统的缓存将被清空,之前缓存的数据均会被批量写入磁盘,缓存的数据将不会像专门的 key-value 缓存系统再将之前缓存的数据重新加载到缓存中。
-每个 vnode 有自己独立的内存,而且由多个固定大小的内存块组成,不同 vnode 之间完全隔离。数据写入时,类似于日志的写法,数据被顺序追加写入内存,但每个 vnode 维护有自己的 skip list,便于迅速查找。当三分之一以上的内存块写满时,启动落盘操作,而且后续写的操作在新的内存块进行。这样,一个 vnode 里有三分之一内存块是保留有最近的数据的,以达到缓存、快速查找的目的。一个 vnode 的内存块的个数由配置参数 blocks 决定,内存块的大小由配置参数 cache 决定。
+每个 vnode 有自己独立的内存,而且由多个固定大小的内存块组成,不同 vnode 之间完全隔离。数据写入时,类似于日志的写法,数据被顺序追加写入内存,但每个 vnode 维护有自己的 skip list,便于迅速查找。当三分之一以上的内存块写满时,启动落盘操作,而且后续写的操作在新的内存块进行。这样,一个 vnode 里有三分之一内存块是保留有最近的数据的,以达到缓存、快速查找的目的。一个 vnode 的内存大小由配置参数 buffer 决定。
### 持久化存储
diff --git a/docs/zh/21-tdinternal/message.webp b/docs/zh/21-tdinternal/message.webp
index a2a42abff3..4a5f5f86f3 100644
Binary files a/docs/zh/21-tdinternal/message.webp and b/docs/zh/21-tdinternal/message.webp differ
diff --git a/docs/zh/21-tdinternal/structure.webp b/docs/zh/21-tdinternal/structure.webp
index b77a42c074..ee7a799975 100644
Binary files a/docs/zh/21-tdinternal/structure.webp and b/docs/zh/21-tdinternal/structure.webp differ
diff --git a/docs/zh/21-tdinternal/write_follower.webp b/docs/zh/21-tdinternal/write_follower.webp
new file mode 100644
index 0000000000..1ca537cd60
Binary files /dev/null and b/docs/zh/21-tdinternal/write_follower.webp differ
diff --git a/docs/zh/21-tdinternal/write_leader.webp b/docs/zh/21-tdinternal/write_leader.webp
new file mode 100644
index 0000000000..1771961b97
Binary files /dev/null and b/docs/zh/21-tdinternal/write_leader.webp differ
diff --git a/docs/zh/21-tdinternal/write_master.webp b/docs/zh/21-tdinternal/write_master.webp
deleted file mode 100644
index 9624036ed3..0000000000
Binary files a/docs/zh/21-tdinternal/write_master.webp and /dev/null differ
diff --git a/docs/zh/21-tdinternal/write_slave.webp b/docs/zh/21-tdinternal/write_slave.webp
deleted file mode 100644
index 7c45dec11b..0000000000
Binary files a/docs/zh/21-tdinternal/write_slave.webp and /dev/null differ
diff --git a/examples/rust b/examples/rust
deleted file mode 160000
index 7ed7a97715..0000000000
--- a/examples/rust
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 7ed7a97715388fa144718764d6bf20f9bfc29a12
diff --git a/include/client/taos.h b/include/client/taos.h
index 6f3244ea82..b7df0e4d29 100644
--- a/include/client/taos.h
+++ b/include/client/taos.h
@@ -271,8 +271,6 @@ DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_raw(TAOS_RES *res, tmq_raw_data *raw);
DLL_EXPORT int32_t tmq_write_raw(TAOS *taos, tmq_raw_data raw);
DLL_EXPORT int taos_write_raw_block(TAOS *taos, int numOfRows, char *pData, const char* tbname);
-
-
DLL_EXPORT void tmq_free_raw(tmq_raw_data raw);
DLL_EXPORT char *tmq_get_json_meta(TAOS_RES *res); // Returning null means error. Returned result need to be freed by tmq_free_json_meta
DLL_EXPORT void tmq_free_json_meta(char* jsonMeta);
diff --git a/include/common/tglobal.h b/include/common/tglobal.h
index c07f422557..57d1199e17 100644
--- a/include/common/tglobal.h
+++ b/include/common/tglobal.h
@@ -89,7 +89,6 @@ extern uint16_t tsTelemPort;
// query buffer management
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
extern int64_t tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node
-extern bool tsRetrieveBlockingModel; // retrieve threads will be blocked
// query client
extern int32_t tsQueryPolicy;
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index 5d6d9178ed..7eafc4c3d8 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -1404,7 +1404,7 @@ typedef struct STableScanAnalyzeInfo {
uint32_t skipBlocks;
uint32_t filterOutBlocks;
double elapsedTime;
- uint64_t filterTime;
+ double filterTime;
} STableScanAnalyzeInfo;
int32_t tSerializeSExplainRsp(void* buf, int32_t bufLen, SExplainRsp* pRsp);
diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h
index a7fae403ed..e15708e357 100644
--- a/include/libs/executor/executor.h
+++ b/include/libs/executor/executor.h
@@ -155,7 +155,7 @@ int32_t qGetQualifiedTableIdList(void* pTableList, const char* tagCond, int32_t
void qProcessRspMsg(void* parent, struct SRpcMsg* pMsg, struct SEpSet* pEpSet);
-int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, int32_t* resNum, SExplainExecInfo** pRes);
+int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList/*,int32_t* resNum, SExplainExecInfo** pRes*/);
int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len);
diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h
index 60ad3ba451..98db7be0d3 100644
--- a/include/libs/function/functionMgt.h
+++ b/include/libs/function/functionMgt.h
@@ -157,6 +157,13 @@ typedef enum EFunctionType {
FUNCTION_TYPE_UDF = 10000
} EFunctionType;
+typedef enum EFuncReturnRows {
+ FUNC_RETURN_ROWS_NORMAL = 1,
+ FUNC_RETURN_ROWS_INDEFINITE,
+ FUNC_RETURN_ROWS_N,
+ FUNC_RETURN_ROWS_N_MINUS_1
+} EFuncReturnRows;
+
struct SqlFunctionCtx;
struct SResultRowEntryInfo;
struct STimeWindow;
@@ -167,6 +174,8 @@ void fmFuncMgtDestroy();
int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen);
+EFuncReturnRows fmGetFuncReturnRows(SFunctionNode* pFunc);
+
bool fmIsBuiltinFunc(const char* pFunc);
bool fmIsAggFunc(int32_t funcId);
@@ -198,6 +207,7 @@ bool fmIsImplicitTsFunc(int32_t funcId);
bool fmIsClientPseudoColumnFunc(int32_t funcId);
bool fmIsMultiRowsFunc(int32_t funcId);
bool fmIsKeepOrderFunc(int32_t funcId);
+bool fmIsCumulativeFunc(int32_t funcId);
int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMergeFunc);
diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h
index 644185a244..2f6bb603c1 100644
--- a/include/libs/nodes/plannodes.h
+++ b/include/libs/nodes/plannodes.h
@@ -96,6 +96,7 @@ typedef struct SScanLogicNode {
bool groupSort;
int8_t cacheLastMode;
bool hasNormalCols; // neither tag column nor primary key tag column
+ bool sortPrimaryKey;
} SScanLogicNode;
typedef struct SJoinLogicNode {
@@ -204,6 +205,7 @@ typedef struct SWindowLogicNode {
int8_t igExpired;
EWindowAlgorithm windowAlgo;
EOrder inputTsOrder;
+ EOrder outputTsOrder;
} SWindowLogicNode;
typedef struct SFillLogicNode {
@@ -212,6 +214,7 @@ typedef struct SFillLogicNode {
SNode* pWStartTs;
SNode* pValues; // SNodeListNode
STimeWindow timeRange;
+ EOrder inputTsOrder;
} SFillLogicNode;
typedef struct SSortLogicNode {
@@ -410,6 +413,8 @@ typedef struct SWinodwPhysiNode {
int8_t triggerType;
int64_t watermark;
int8_t igExpired;
+ EOrder inputTsOrder;
+ EOrder outputTsOrder;
} SWinodwPhysiNode;
typedef struct SIntervalPhysiNode {
@@ -434,6 +439,7 @@ typedef struct SFillPhysiNode {
SNode* pValues; // SNodeListNode
SNodeList* pTargets;
STimeWindow timeRange;
+ EOrder inputTsOrder;
} SFillPhysiNode;
typedef struct SMultiTableIntervalPhysiNode {
diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h
index 0600d16d72..5dc1e7512f 100644
--- a/include/libs/nodes/querynodes.h
+++ b/include/libs/nodes/querynodes.h
@@ -253,6 +253,7 @@ typedef struct SSelectStmt {
char stmtName[TSDB_TABLE_NAME_LEN];
uint8_t precision;
int32_t selectFuncNum;
+ int32_t returnRows; // EFuncReturnRows
bool isEmptyResult;
bool isTimeLineResult;
bool isSubquery;
diff --git a/packaging/tools/make_install.bat b/packaging/tools/make_install.bat
index 64f30b8465..0f9e836ae2 100644
--- a/packaging/tools/make_install.bat
+++ b/packaging/tools/make_install.bat
@@ -1,6 +1,7 @@
@echo off
goto %1
:needAdmin
-mshta vbscript:createobject("shell.application").shellexecute("%~s0",":hasAdmin","","runas",1)(window.close)&goto :eof
+mshta vbscript:createobject("shell.application").shellexecute("%~s0",":hasAdmin","","runas",1)(window.close)&& echo To start/stop TDengine with administrator privileges: sc start/stop taosd &goto :eof
:hasAdmin
-cp -f C:\\TDengine\\driver\\taos.dll C:\\Windows\\System32
\ No newline at end of file
+cp -f C:\\TDengine\\driver\\taos.dll C:\\Windows\\System32
+sc query "taosd" >nul || sc create "taosd" binPath= "C:\\TDengine\\taosd.exe --win_service" start= DEMAND
diff --git a/source/client/CMakeLists.txt b/source/client/CMakeLists.txt
index 129e20e5de..f52edbe71f 100644
--- a/source/client/CMakeLists.txt
+++ b/source/client/CMakeLists.txt
@@ -20,11 +20,6 @@ target_link_libraries(
)
if(TD_WINDOWS)
- set_target_properties(taos
- PROPERTIES
- LINK_FLAGS
- /DEF:${CMAKE_CURRENT_SOURCE_DIR}/src/taos.def
- )
INCLUDE_DIRECTORIES(jni/windows)
INCLUDE_DIRECTORIES(jni/windows/win32)
INCLUDE_DIRECTORIES(jni/windows/win32/bridge)
diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c
index 5fe9b6bdb1..56d09850fc 100644
--- a/source/client/src/clientSml.c
+++ b/source/client/src/clientSml.c
@@ -84,32 +84,11 @@
typedef TSDB_SML_PROTOCOL_TYPE SMLProtocolType;
typedef enum {
- SCHEMA_ACTION_CREATE_STABLE,
- SCHEMA_ACTION_ADD_COLUMN,
- SCHEMA_ACTION_ADD_TAG,
- SCHEMA_ACTION_CHANGE_COLUMN_SIZE,
- SCHEMA_ACTION_CHANGE_TAG_SIZE,
+ SCHEMA_ACTION_NULL,
+ SCHEMA_ACTION_COLUMN,
+ SCHEMA_ACTION_TAG
} ESchemaAction;
-typedef struct {
- char sTableName[TSDB_TABLE_NAME_LEN];
- SArray *tags;
- SArray *fields;
-} SCreateSTableActionInfo;
-
-typedef struct {
- char sTableName[TSDB_TABLE_NAME_LEN];
- SSmlKv *field;
-} SAlterSTableActionInfo;
-
-typedef struct {
- ESchemaAction action;
- union {
- SCreateSTableActionInfo createSTable;
- SAlterSTableActionInfo alterSTable;
- };
-} SSchemaAction;
-
typedef struct {
const char *measure;
const char *tags;
@@ -226,18 +205,20 @@ static inline bool smlCheckDuplicateKey(const char *key, int32_t keyLen, SHashOb
}
static int32_t smlBuildInvalidDataMsg(SSmlMsgBuf *pBuf, const char *msg1, const char *msg2) {
- memset(pBuf->buf, 0, pBuf->len);
- if (msg1) strncat(pBuf->buf, msg1, pBuf->len);
- int32_t left = pBuf->len - strlen(pBuf->buf);
- if (left > 2 && msg2) {
- strncat(pBuf->buf, ":", left - 1);
- strncat(pBuf->buf, msg2, left - 2);
+ if(pBuf->buf){
+ memset(pBuf->buf, 0, pBuf->len);
+ if (msg1) strncat(pBuf->buf, msg1, pBuf->len);
+ int32_t left = pBuf->len - strlen(pBuf->buf);
+ if (left > 2 && msg2) {
+ strncat(pBuf->buf, ":", left - 1);
+ strncat(pBuf->buf, msg2, left - 2);
+ }
}
return TSDB_CODE_SML_INVALID_DATA;
}
static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSmlKv *kv, bool isTag,
- SSchemaAction *action, bool *actionNeeded, SSmlHandle *info) {
+ ESchemaAction *action, SSmlHandle *info) {
uint16_t *index = (uint16_t *)taosHashGet(colHash, kv->key, kv->keyLen);
if (index) {
if (colField[*index].type != kv->type) {
@@ -251,25 +232,17 @@ static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSm
(colField[*index].type == TSDB_DATA_TYPE_NCHAR &&
((colField[*index].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE < kv->length))) {
if (isTag) {
- action->action = SCHEMA_ACTION_CHANGE_TAG_SIZE;
+ *action = SCHEMA_ACTION_TAG;
} else {
- action->action = SCHEMA_ACTION_CHANGE_COLUMN_SIZE;
+ *action = SCHEMA_ACTION_COLUMN;
}
- action->alterSTable.field = kv;
- *actionNeeded = true;
}
} else {
if (isTag) {
- action->action = SCHEMA_ACTION_ADD_TAG;
+ *action = SCHEMA_ACTION_TAG;
} else {
- action->action = SCHEMA_ACTION_ADD_COLUMN;
+ *action = SCHEMA_ACTION_COLUMN;
}
- action->alterSTable.field = kv;
- *actionNeeded = true;
- }
- if (*actionNeeded) {
- uDebug("SML:0x%" PRIx64 " generate schema action. kv->name: %s, action: %d", info->id, kv->key,
- action->action);
}
return 0;
}
@@ -284,171 +257,25 @@ static int32_t smlFindNearestPowerOf2(int32_t length, uint8_t type) {
} else if (type == TSDB_DATA_TYPE_NCHAR && result > (TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE){
result = (TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
}
+
+ if (type == TSDB_DATA_TYPE_NCHAR){
+ result = result * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE;
+ }else if (type == TSDB_DATA_TYPE_BINARY){
+ result = result + VARSTR_HEADER_SIZE;
+ }
return result;
}
-static int32_t smlBuildColumnDescription(SSmlKv *field, char *buf, int32_t bufSize, int32_t *outBytes) {
- uint8_t type = field->type;
- char tname[TSDB_TABLE_NAME_LEN] = {0};
- memcpy(tname, field->key, field->keyLen);
- if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
- int32_t bytes = smlFindNearestPowerOf2(field->length, type);
- int out = snprintf(buf, bufSize, "`%s` %s(%d)", tname, tDataTypes[field->type].name, bytes);
- *outBytes = out;
- } else {
- int out = snprintf(buf, bufSize, "`%s` %s", tname, tDataTypes[type].name);
- *outBytes = out;
- }
-
- return 0;
-}
-
-static int32_t smlApplySchemaAction(SSmlHandle *info, SSchemaAction *action) {
- int32_t code = 0;
- int32_t outBytes = 0;
- char *result = (char *)taosMemoryCalloc(1, TSDB_MAX_ALLOWED_SQL_LEN);
- int32_t capacity = TSDB_MAX_ALLOWED_SQL_LEN;
-
- uDebug("SML:0x%" PRIx64 " apply schema action. action: %d", info->id, action->action);
- switch (action->action) {
- case SCHEMA_ACTION_ADD_COLUMN: {
- int n = sprintf(result, "alter stable `%s` add column ", action->alterSTable.sTableName);
- smlBuildColumnDescription(action->alterSTable.field, result + n, capacity - n, &outBytes);
- TAOS_RES *res = taos_query((TAOS*)&info->taos->id, result); // TODO async doAsyncQuery
- code = taos_errno(res);
- const char *errStr = taos_errstr(res);
- if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " apply schema action. error: %s", info->id, errStr);
- taosMsleep(100);
- }
- taos_free_result(res);
-
- break;
- }
- case SCHEMA_ACTION_ADD_TAG: {
- int n = sprintf(result, "alter stable `%s` add tag ", action->alterSTable.sTableName);
- smlBuildColumnDescription(action->alterSTable.field, result + n, capacity - n, &outBytes);
- TAOS_RES *res = taos_query((TAOS*)&info->taos->id, result); // TODO async doAsyncQuery
- code = taos_errno(res);
- const char *errStr = taos_errstr(res);
- if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " apply schema action. error : %s", info->id, taos_errstr(res));
- taosMsleep(100);
- }
- taos_free_result(res);
-
- break;
- }
- case SCHEMA_ACTION_CHANGE_COLUMN_SIZE: {
- int n = sprintf(result, "alter stable `%s` modify column ", action->alterSTable.sTableName);
- smlBuildColumnDescription(action->alterSTable.field, result + n, capacity - n, &outBytes);
- TAOS_RES *res = taos_query((TAOS*)&info->taos->id, result); // TODO async doAsyncQuery
- code = taos_errno(res);
- if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " apply schema action. error : %s", info->id, taos_errstr(res));
- taosMsleep(100);
- }
- taos_free_result(res);
-
- break;
- }
- case SCHEMA_ACTION_CHANGE_TAG_SIZE: {
- int n = sprintf(result, "alter stable `%s` modify tag ", action->alterSTable.sTableName);
- smlBuildColumnDescription(action->alterSTable.field, result + n, capacity - n, &outBytes);
- TAOS_RES *res = taos_query((TAOS*)&info->taos->id, result); // TODO async doAsyncQuery
- code = taos_errno(res);
- if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " apply schema action. error : %s", info->id, taos_errstr(res));
- taosMsleep(100);
- }
- taos_free_result(res);
-
- break;
- }
- case SCHEMA_ACTION_CREATE_STABLE: {
- int n = sprintf(result, "create stable `%s` (", action->createSTable.sTableName);
- char *pos = result + n;
- int freeBytes = capacity - n;
-
- SArray *cols = action->createSTable.fields;
-
- for (int i = 0; i < taosArrayGetSize(cols); i++) {
- SSmlKv *kv = (SSmlKv *)taosArrayGetP(cols, i);
- smlBuildColumnDescription(kv, pos, freeBytes, &outBytes);
- pos += outBytes;
- freeBytes -= outBytes;
- *pos = ',';
- ++pos;
- --freeBytes;
- }
-
- --pos;
- ++freeBytes;
-
- outBytes = snprintf(pos, freeBytes, ") tags (");
- pos += outBytes;
- freeBytes -= outBytes;
-
- cols = action->createSTable.tags;
- for (int i = 0; i < taosArrayGetSize(cols); i++) {
- SSmlKv *kv = (SSmlKv *)taosArrayGetP(cols, i);
- smlBuildColumnDescription(kv, pos, freeBytes, &outBytes);
- pos += outBytes;
- freeBytes -= outBytes;
- *pos = ',';
- ++pos;
- --freeBytes;
- }
- if (taosArrayGetSize(cols) == 0) {
- outBytes = snprintf(pos, freeBytes, "`%s` %s(%d)", tsSmlTagName, tDataTypes[TSDB_DATA_TYPE_NCHAR].name, 1);
- pos += outBytes;
- freeBytes -= outBytes;
- *pos = ',';
- ++pos;
- --freeBytes;
- }
- pos--;
- ++freeBytes;
- outBytes = snprintf(pos, freeBytes, ")");
- TAOS_RES *res = taos_query((TAOS*)&info->taos->id, result);
- code = taos_errno(res);
- if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " apply schema action. error : %s", info->id, taos_errstr(res));
- taosMsleep(100);
- }
- taos_free_result(res);
-
- break;
- }
-
- default:
- break;
- }
-
- taosMemoryFreeClear(result);
- if (code != 0) {
- uError("SML:0x%" PRIx64 " apply schema action failure. %s", info->id, tstrerror(code));
- }
- return code;
-}
-
static int32_t smlProcessSchemaAction(SSmlHandle *info, SSchema *schemaField, SHashObj *schemaHash, SArray *cols,
- SSchemaAction *action, bool isTag) {
+ ESchemaAction *action, bool isTag) {
int32_t code = TSDB_CODE_SUCCESS;
for (int j = 0; j < taosArrayGetSize(cols); ++j) {
if(j == 0 && !isTag) continue;
SSmlKv *kv = (SSmlKv *)taosArrayGetP(cols, j);
- bool actionNeeded = false;
- code = smlGenerateSchemaAction(schemaField, schemaHash, kv, isTag, action, &actionNeeded, info);
+ code = smlGenerateSchemaAction(schemaField, schemaHash, kv, isTag, action, info);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- if (actionNeeded) {
- code = smlApplySchemaAction(info, action);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
- }
}
return TSDB_CODE_SUCCESS;
}
@@ -475,6 +302,144 @@ static int32_t smlCheckMeta(SSchema *schema, int32_t length, SArray *cols, bool
return 0;
}
+static int32_t getBytes(uint8_t type, int32_t length){
+ if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
+ return smlFindNearestPowerOf2(length, type);
+ } else {
+ return tDataTypes[type].bytes;
+ }
+}
+
+//static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SSmlSTableMeta *sTableData,
+// int32_t colVer, int32_t tagVer, int8_t source, uint64_t suid){
+static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SSmlSTableMeta *sTableData,
+ STableMeta *pTableMeta, ESchemaAction action){
+
+ SRequestObj* pRequest = NULL;
+ SMCreateStbReq pReq = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+ SCmdMsgInfo pCmdMsg = {0};
+
+ code = buildRequest(info->taos->id, "", 0, NULL, false, &pRequest);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+
+ if (action == SCHEMA_ACTION_NULL){
+ pReq.colVer = 1;
+ pReq.tagVer = 1;
+ pReq.suid = 0;
+ pReq.source = TD_REQ_FROM_APP;
+ } else if (action == SCHEMA_ACTION_TAG){
+ pReq.colVer = pTableMeta->sversion;
+ pReq.tagVer = pTableMeta->tversion + 1;
+ pReq.suid = pTableMeta->uid;
+ pReq.source = TD_REQ_FROM_TAOX;
+ } else if (action == SCHEMA_ACTION_COLUMN){
+ pReq.colVer = pTableMeta->sversion + 1;
+ pReq.tagVer = pTableMeta->tversion;
+ pReq.suid = pTableMeta->uid;
+ pReq.source = TD_REQ_FROM_TAOX;
+ }
+
+ pReq.commentLen = -1;
+ pReq.igExists = true;
+ tNameExtractFullName(pName, pReq.name);
+
+ if(action == SCHEMA_ACTION_NULL || action == SCHEMA_ACTION_COLUMN){
+ pReq.numOfColumns = taosArrayGetSize(sTableData->cols);
+ pReq.pColumns = taosArrayInit(pReq.numOfColumns, sizeof(SField));
+ for (int i = 0; i < pReq.numOfColumns; i++) {
+ SSmlKv *kv = (SSmlKv *)taosArrayGetP(sTableData->cols, i);
+ SField field = {0};
+ field.type = kv->type;
+ field.bytes = getBytes(kv->type, kv->length);
+ memcpy(field.name, kv->key, kv->keyLen);
+ taosArrayPush(pReq.pColumns, &field);
+ }
+ }else if (action == SCHEMA_ACTION_TAG){
+ pReq.numOfColumns = pTableMeta->tableInfo.numOfColumns;
+ pReq.pColumns = taosArrayInit(pReq.numOfColumns, sizeof(SField));
+ for (int i = 0; i < pReq.numOfColumns; i++) {
+ SSchema *s = &pTableMeta->schema[i];
+ SField field = {0};
+ field.type = s->type;
+ field.bytes = s->bytes;
+ strcpy(field.name, s->name);
+ taosArrayPush(pReq.pColumns, &field);
+ }
+ }
+
+ if(action == SCHEMA_ACTION_NULL || action == SCHEMA_ACTION_TAG){
+ pReq.numOfTags = taosArrayGetSize(sTableData->tags);
+ if (pReq.numOfTags == 0){
+ pReq.numOfTags = 1;
+ pReq.pTags = taosArrayInit(pReq.numOfTags, sizeof(SField));
+ SField field = {0};
+ field.type = TSDB_DATA_TYPE_NCHAR;
+ field.bytes = 1;
+ strcpy(field.name, tsSmlTagName);
+ taosArrayPush(pReq.pTags, &field);
+ }else{
+ pReq.pTags = taosArrayInit(pReq.numOfTags, sizeof(SField));
+ for (int i = 0; i < pReq.numOfTags; i++) {
+ SSmlKv *kv = (SSmlKv *)taosArrayGetP(sTableData->tags, i);
+ SField field = {0};
+ field.type = kv->type;
+ field.bytes = getBytes(kv->type, kv->length);
+ memcpy(field.name, kv->key, kv->keyLen);
+ taosArrayPush(pReq.pTags, &field);
+ }
+ }
+ }else if (action == SCHEMA_ACTION_COLUMN){
+ pReq.numOfTags = pTableMeta->tableInfo.numOfTags;
+ pReq.pTags = taosArrayInit(pReq.numOfTags, sizeof(SField));
+ for (int i = 0; i < pReq.numOfTags; i++) {
+ SSchema *s = &pTableMeta->schema[i + pTableMeta->tableInfo.numOfColumns];
+ SField field = {0};
+ field.type = s->type;
+ field.bytes = s->bytes;
+ strcpy(field.name, s->name);
+ taosArrayPush(pReq.pTags, &field);
+ }
+ }
+
+ pCmdMsg.epSet = getEpSet_s(&info->taos->pAppInfo->mgmtEp);
+ pCmdMsg.msgType = TDMT_MND_CREATE_STB;
+ pCmdMsg.msgLen = tSerializeSMCreateStbReq(NULL, 0, &pReq);
+ pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen);
+ if (NULL == pCmdMsg.pMsg) {
+ tFreeSMCreateStbReq(&pReq);
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ tSerializeSMCreateStbReq(pCmdMsg.pMsg, pCmdMsg.msgLen, &pReq);
+
+ SQuery pQuery;
+ pQuery.execMode = QUERY_EXEC_MODE_RPC;
+ pQuery.pCmdMsg = &pCmdMsg;
+ pQuery.msgType = pQuery.pCmdMsg->msgType;
+ pQuery.stableQuery = true;
+
+ launchQueryImpl(pRequest, &pQuery, true, NULL);
+
+ if(pRequest->code == TSDB_CODE_SUCCESS){
+ catalogRemoveTableMeta(info->pCatalog, pName);
+ }
+ code = pRequest->code;
+ taosMemoryFree(pCmdMsg.pMsg);
+
+end:
+ destroyRequest(pRequest);
+ tFreeSMCreateStbReq(&pReq);
+ return code;
+}
+
static int32_t smlModifyDBSchemas(SSmlHandle *info) {
int32_t code = 0;
SName pName = {TSDB_TABLE_NAME_T, info->taos->acctId, {0}, {0}};
@@ -500,16 +465,9 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta);
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST || code == TSDB_CODE_MND_STB_NOT_EXIST) {
- SSchemaAction schemaAction;
- schemaAction.action = SCHEMA_ACTION_CREATE_STABLE;
- memset(&schemaAction.createSTable, 0, sizeof(SCreateSTableActionInfo));
- memcpy(schemaAction.createSTable.sTableName, superTable, superTableLen);
- schemaAction.createSTable.tags = sTableData->tags;
- schemaAction.createSTable.fields = sTableData->cols;
- code = smlApplySchemaAction(info, &schemaAction);
+ code = smlSendMetaMsg(info, &pName, sTableData, NULL, SCHEMA_ACTION_NULL);
if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " smlApplySchemaAction failed. can not create %s", info->id,
- schemaAction.createSTable.sTableName);
+ uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable);
goto end;
}
info->cost.numOfCreateSTables++;
@@ -521,24 +479,42 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
taosHashPut(hashTmp, pTableMeta->schema[i].name, strlen(pTableMeta->schema[i].name), &i, SHORT_BYTES);
}
- SSchemaAction schemaAction;
- memset(&schemaAction, 0, sizeof(SSchemaAction));
- memcpy(schemaAction.createSTable.sTableName, superTable, superTableLen);
- code = smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->tags, &schemaAction, true);
+ ESchemaAction action = SCHEMA_ACTION_NULL;
+ code = smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->tags, &action, true);
if (code != TSDB_CODE_SUCCESS) {
taosHashCleanup(hashTmp);
goto end;
}
+ if (action == SCHEMA_ACTION_TAG){
+ code = smlSendMetaMsg(info, &pName, sTableData, pTableMeta, action);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable);
+ goto end;
+ }
+ }
+
+ code = catalogRefreshTableMeta(info->pCatalog, &conn, &pName, -1);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
taosHashClear(hashTmp);
for (uint16_t i = 1; i < pTableMeta->tableInfo.numOfColumns; i++) {
taosHashPut(hashTmp, pTableMeta->schema[i].name, strlen(pTableMeta->schema[i].name), &i, SHORT_BYTES);
}
- code = smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->cols, &schemaAction, false);
+ action = SCHEMA_ACTION_NULL;
+ code = smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->cols, &action, false);
taosHashCleanup(hashTmp);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
+ if (action == SCHEMA_ACTION_COLUMN){
+ code = smlSendMetaMsg(info, &pName, sTableData, pTableMeta, action);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable);
+ goto end;
+ }
+ }
code = catalogRefreshTableMeta(info->pCatalog, &conn, &pName, -1);
if (code != TSDB_CODE_SUCCESS) {
@@ -1504,11 +1480,13 @@ static SSmlHandle* smlBuildSmlInfo(STscObj* pTscObj, SRequestObj* request, SMLPr
}
((SVnodeModifOpStmt *)(info->pQuery->pRoot))->payloadType = PAYLOAD_TYPE_KV;
- info->taos = pTscObj;
- code = catalogGetHandle(info->taos->pAppInfo->clusterId, &info->pCatalog);
- if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " get catalog error %d", info->id, code);
- goto cleanup;
+ if (pTscObj){
+ info->taos = pTscObj;
+ code = catalogGetHandle(info->taos->pAppInfo->clusterId, &info->pCatalog);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("SML:0x%" PRIx64 " get catalog error %d", info->id, code);
+ goto cleanup;
+ }
}
info->precision = precision;
@@ -1518,9 +1496,12 @@ static SSmlHandle* smlBuildSmlInfo(STscObj* pTscObj, SRequestObj* request, SMLPr
} else {
info->dataFormat = true;
}
- info->pRequest = request;
- info->msgBuf.buf = info->pRequest->msgBuf;
- info->msgBuf.len = ERROR_MSG_BUF_DEFAULT_SIZE;
+
+ if(request){
+ info->pRequest = request;
+ info->msgBuf.buf = info->pRequest->msgBuf;
+ info->msgBuf.len = ERROR_MSG_BUF_DEFAULT_SIZE;
+ }
info->exec = smlInitHandle(info->pQuery);
info->childTables = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
diff --git a/source/client/src/taos.def b/source/client/src/taos.def
deleted file mode 100644
index 994dd75090..0000000000
--- a/source/client/src/taos.def
+++ /dev/null
@@ -1,81 +0,0 @@
-taos_cleanup
-taos_options
-taos_set_config
-taos_init
-taos_connect
-taos_connect_l
-taos_connect_auth
-taos_close
-taos_data_type
-taos_stmt_init
-taos_stmt_prepare
-taos_stmt_set_tbname_tags
-taos_stmt_set_tbname
-taos_stmt_set_sub_tbname
-taos_stmt_is_insert
-taos_stmt_num_params
-taos_stmt_get_param
-taos_stmt_bind_param
-taos_stmt_bind_param_batch
-taos_stmt_bind_single_param_batch
-taos_stmt_add_batch
-taos_stmt_execute
-taos_stmt_use_result
-taos_stmt_close
-taos_stmt_errstr
-taos_stmt_affected_rows
-taos_stmt_affected_rows_once
-taos_query
-taos_query_l
-taos_fetch_row
-taos_result_precision
-taos_free_result
-taos_field_count
-taos_num_fields
-taos_affected_rows
-taos_fetch_fields
-taos_select_db
-taos_print_row
-taos_stop_query
-taos_is_null
-taos_is_update_query
-taos_fetch_block
-taos_fetch_block_s
-taos_fetch_raw_block
-taos_get_column_data_offset
-taos_validate_sql
-taos_reset_current_db
-taos_fetch_lengths
-taos_result_block
-taos_get_server_info
-taos_get_client_info
-taos_errstr
-taos_errno
-taos_query_a
-taos_fetch_rows_a
-taos_subscribe
-taos_consume
-taos_unsubscribe
-taos_load_table_info
-taos_schemaless_insert
-tmq_list_new
-tmq_list_append
-tmq_list_destroy
-tmq_list_get_size
-tmq_list_to_c_array
-tmq_consumer_new
-tmq_err2str
-tmq_subscribe
-tmq_unsubscribe
-tmq_subscription
-tmq_consumer_poll
-tmq_consumer_close
-tmq_commit
-tmq_conf_new
-tmq_conf_set
-tmq_conf_destroy
-tmq_conf_set_offset_commit_cb
-tmq_get_topic_name
-tmq_get_vgroup_id
-tmq_create_stream
-taos_check_server_status
\ No newline at end of file
diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp
index e7ae3917f9..08b0f3abb2 100644
--- a/source/client/test/clientTests.cpp
+++ b/source/client/test/clientTests.cpp
@@ -123,7 +123,7 @@ void createNewTable(TAOS* pConn, int32_t index) {
}
taos_free_result(pRes);
- for(int32_t i = 0; i < 1000; i += 20) {
+ for(int32_t i = 0; i < 100000; i += 20) {
char sql[1024] = {0};
sprintf(sql,
"insert into tu%d values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
@@ -154,7 +154,7 @@ TEST(testCase, driverInit_Test) {
}
TEST(testCase, connect_Test) {
-// taos_options(TSDB_OPTION_CONFIGDIR, "/home/ubuntu/first/cfg");
+ taos_options(TSDB_OPTION_CONFIGDIR, "/home/lisa/Documents/workspace/tdengine/sim/dnode1/cfg");
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
if (pConn == NULL) {
@@ -501,7 +501,6 @@ TEST(testCase, show_vgroup_Test) {
taos_close(pConn);
}
-
TEST(testCase, create_multiple_tables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
@@ -665,6 +664,7 @@ TEST(testCase, insert_test) {
taos_free_result(pRes);
taos_close(pConn);
}
+#endif
TEST(testCase, projection_query_tables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
@@ -697,7 +697,7 @@ TEST(testCase, projection_query_tables) {
}
taos_free_result(pRes);
- for(int32_t i = 0; i < 100; ++i) {
+ for(int32_t i = 0; i < 1; ++i) {
printf("create table :%d\n", i);
createNewTable(pConn, i);
}
@@ -723,6 +723,7 @@ TEST(testCase, projection_query_tables) {
taos_close(pConn);
}
+#if 0
TEST(testCase, projection_query_stables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
@@ -820,21 +821,8 @@ TEST(testCase, async_api_test) {
getchar();
taos_close(pConn);
}
-#endif
-
TEST(testCase, update_test) {
-
- SInterval interval = {0};
- interval.offset = 8000;
- interval.interval = 10000;
- interval.sliding = 4000;
- interval.intervalUnit = 's';
- interval.offsetUnit = 's';
- interval.slidingUnit = 's';
-// STimeWindow w = getAlignQueryTimeWindow(&interval, 0, 1630000000000);
- STimeWindow w = getAlignQueryTimeWindow(&interval, 0, 1629999999999);
-
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
@@ -869,4 +857,8 @@ TEST(testCase, update_test) {
taos_free_result(pRes);
}
}
+
+#endif
+
+
#pragma GCC diagnostic pop
diff --git a/source/client/test/smlTest.cpp b/source/client/test/smlTest.cpp
index d74be742a2..68a8b9d336 100644
--- a/source/client/test/smlTest.cpp
+++ b/source/client/test/smlTest.cpp
@@ -511,447 +511,10 @@ TEST(testCase, smlParseNumber_Test) {
printf("res:%d,v:%f, %f\n", res,kv.d, HUGE_VAL);
}
-//#include
-//TEST(testCase, number_Test) {
-// char *str[] = {
-//// "-000 0999",
-// "- abc",
-// };
-// for(int i = 0; i < sizeof(str)/sizeof(str[0]); i++){
-// errno = 0;
-// char *end = NULL;
-// long result = strtol(str[i], &end, 10);
-// printf("errno:%d,len:%d,result:%ld\n", errno, end - str[i], result);
-// }
-//
-//}
-/*
-TEST(testCase, smlProcess_influx_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
-
- TAOS_RES* pRes = taos_query(taos, "create database if not exists inflx_db schemaless 1");
- taos_free_result(pRes);
-
- pRes = taos_query(taos, "use inflx_db");
- taos_free_result(pRes);
-
- const char *sql[] = {
- "readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,velocity=0,heading=221,grade=0 1451606401000000000",
- "readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,velocity=0,heading=221,grade=0,fuel_consumption=25 1451607402000000000",
- "readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,heading=221,grade=0,fuel_consumption=25 1451608403000000000",
- "readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,velocity=0,heading=221,grade=0,fuel_consumption=25 1451609404000000000",
- "readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 fuel_consumption=25,grade=0 1451619405000000000",
- "readings,name=truck_1,fleet=South,driver=Albert,model=F-150,device_version=v1.5 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=72.45258,longitude=68.83761,elevation=255,velocity=0,heading=181,grade=0,fuel_consumption=25 1451606406000000000",
- "readings,name=truck_2,driver=Derek,model=F-150,device_version=v1.5 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=24.5208,longitude=28.09377,elevation=428,velocity=0,heading=304,grade=0,fuel_consumption=25 1451606407000000000",
- "readings,name=truck_2,fleet=North,driver=Derek,model=F-150 load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=24.5208,longitude=28.09377,elevation=428,velocity=0,heading=304,grade=0,fuel_consumption=25 1451609408000000000",
- "readings,fleet=South,name=truck_0,driver=Trish,model=H-2,device_version=v2.3 fuel_consumption=25,grade=0 1451629409000000000",
- "stable,t1=t1,t2=t2,t3=t3 c1=1,c2=2,c3=\"kk\",c4=4 1451629501000000000",
- "stable,t2=t2,t1=t1,t3=t3 c1=1,c3=\"\",c4=4 1451629602000000000",
- };
- pRes = taos_schemaless_insert(taos, (char**)sql, sizeof(sql)/sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, 0);
- ASSERT_EQ(taos_errno(pRes), 0);
- taos_free_result(pRes);
-
- // case 1
- pRes = taos_query(taos, "select * from t_91e0b182be80332b5c530cbf872f760e");
- ASSERT_NE(pRes, nullptr);
- int fieldNum = taos_field_count(pRes);
- ASSERT_EQ(fieldNum, 11);
- printf("fieldNum:%d\n", fieldNum);
-
- TAOS_ROW row = NULL;
- int32_t rowIndex = 0;
- while((row = taos_fetch_row(pRes)) != NULL) {
- int64_t ts = *(int64_t*)row[0];
- double load_capacity = *(double*)row[1];
- double fuel_capacity = *(double*)row[2];
- double nominal_fuel_consumption = *(double*)row[3];
- double latitude = *(double*)row[4];
- double longitude = *(double*)row[5];
- double elevation = *(double*)row[6];
- double velocity = *(double*)row[7];
- double heading = *(double*)row[8];
- double grade = *(double*)row[9];
- double fuel_consumption = *(double*)row[10];
- if(rowIndex == 0){
- ASSERT_EQ(ts, 1451606407000);
- ASSERT_EQ(load_capacity, 2000);
- ASSERT_EQ(fuel_capacity, 200);
- ASSERT_EQ(nominal_fuel_consumption, 15);
- ASSERT_EQ(latitude, 24.5208);
- ASSERT_EQ(longitude, 28.09377);
- ASSERT_EQ(elevation, 428);
- ASSERT_EQ(velocity, 0);
- ASSERT_EQ(heading, 304);
- ASSERT_EQ(grade, 0);
- ASSERT_EQ(fuel_consumption, 25);
- }else{
- ASSERT_FALSE(1);
- }
- rowIndex++;
- }
- taos_free_result(pRes);
-
- // case 2
- pRes = taos_query(taos, "select * from t_6885c584b98481584ee13dac399e173d");
- ASSERT_NE(pRes, nullptr);
- fieldNum = taos_field_count(pRes);
- ASSERT_EQ(fieldNum, 5);
- printf("fieldNum:%d\n", fieldNum);
-
- rowIndex = 0;
- while((row = taos_fetch_row(pRes)) != NULL) {
- int *length = taos_fetch_lengths(pRes);
-
- int64_t ts = *(int64_t*)row[0];
- double c1 = *(double*)row[1];
- double c4 = *(double*)row[4];
- if(rowIndex == 0){
- ASSERT_EQ(ts, 1451629501000);
- ASSERT_EQ(c1, 1);
- ASSERT_EQ(*(double*)row[2], 2);
- ASSERT_EQ(length[3], 2);
- ASSERT_EQ(memcmp(row[3], "kk", length[3]), 0);
- ASSERT_EQ(c4, 4);
- }else if(rowIndex == 1){
- ASSERT_EQ(ts, 1451629602000);
- ASSERT_EQ(c1, 1);
- ASSERT_EQ(row[2], nullptr);
- ASSERT_EQ(length[3], 0);
- ASSERT_EQ(c4, 4);
- }else{
- ASSERT_FALSE(1);
- }
- rowIndex++;
- }
- taos_free_result(pRes);
-
- // case 2
- pRes = taos_query(taos, "show tables");
- ASSERT_NE(pRes, nullptr);
-
- row = taos_fetch_row(pRes);
- int rowNum = taos_affected_rows(pRes);
- ASSERT_EQ(rowNum, 5);
- taos_free_result(pRes);
-}
-
-// different types
-TEST(testCase, smlParseLine_error_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
-
- TAOS_RES* pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
- taos_free_result(pRes);
-
- pRes = taos_query(taos, "use sml_db");
- taos_free_result(pRes);
-
- const char *sql[] = {
- "krtqjjkzfg,t2=5f64,t3=L\"ste\" c1=true,c2=4i64,c3=\"iam\" 1626056811823316532",
- "krtqjjkzfg,t2=5f64,t3=L\"ste2\" c3=\"iamszhou\",c4=false 1626056811843316532",
- "krtqjjkzfg,t2=5f64,t3=L\"ste2\" c3=\"iamszhou\",c4=false,c5=32i8,c6=64i16,c7=32i32,c8=88.88f32 1626056812843316532"
-// "ecaomjycxl,t0=t,t1=gptkzldguxgnfvlodfomaabofbsdfvwkzwokpyxqhfylzrzmddjllvatokfuzuzevmoxzmrdsgfyxzxcbxlkxtuctbmwgxjekojjbdtfbzabtccidinfsyrrbxjsliujnkqenagebvnkljjrqotmkjkprostwmtmufmihqbzrwpdomwzlyfsyhaydcofohwichlbesgujmlruftlikqsfbjyctopdxnesqfdklwvrkvolxxhinfaczkzynffphsccerjmiwteqllnhxprlpxxgwxrlgprakmvzdedptcxmeicgsewaefufdyicewiabmmduuwygggokwllirsuhstrvgruywfvftqstugcihsepltbtoqgsrvqbuzzjahbitssymdtieksqdjkafztekeybbqdhuyxqeniozgixutoikuugboapfhvknyipgmpnkhaqbccyycjfqohevpgsndcxppbtwemjwrvavvuxaontrknufynenrpnpyuhaozoeuizmvycknmmujmveaskgtybdkemrusekuaofntgzqijcrnnvnrdkbzigtoeiuihoohebaesfafqlszqoccbjakkbqdqohkvpzbzpjivkyqgiprreljvzaavymuojowacoexrbtrkdsmpnjpdapqtbilmihxoytvrphjckehpioilevatiulqgrnvavpzaknocgplwvocvonkjqaxmnzkghfxresuicfxpvurndqvvyuafmmcoaysogdhupdgrxupwzugslzehwtwapyditfhlwgvwypypfyiaouobpdherkdybrhatsejbxxozbencmwxxbveaoyvlwainfjttstygghihqyjpoerxxkdtrzhcephntuueludtywxirjntqvtafxhqkicpogphysnrtsfkqodahxevshxumecnxtenwmgcoalgvfzghmzsnysombtlkowgfuzelvihtzgxmoktqhltuxxyxucleydssoywkvribqkwwziqgllszvfwubtyuwwhyvicbhjiybkrryjvcqwnbwjkyatpaqntkevbrizjuzjnbwplqlpnpdkpewvgsuhhndudznazireluqkebawasxwdpewchxsegrgigxbixsarblhspuvkwcnyjwxygubrmrybvamjjoynozjsradzxnovldcfqesdzrdthgecporhfelhorgqoldssyuqmunrqhhrogjbcbzssrgnasxxixvusykowycwmcbhruxlflejsewksutysezeahfxfvifuujmtvuiddhetsykbrngppqhujuzdnvogltkwdwwvdhahdbtobpjwuqgnunvyenvmqdslkwuanvslyzodvkcfdvhgmixzzqqrukdslxugfqinqhmddwztygynowpkmlitnlcecoyjxtgwpggjrtphznarzwqlojninlqwcwizdmzwyimkirbrgxgroxbrajxbkwzjlhrccwmshfmddmxvewmwtedfwkjpbrrfcxkypigifjwgdiwesbyhbhnumcswcojnqlnzebhlpgsxufqycqadticqgkgbxkhrisyhkwjdfotladolmcspmqxpgreqctibcclbheaaudoigoevqrksohvuoskrufqdnzharmwkfxepzhvnkuywwhpzzmlksnfrjcbntwxzpgdsqonottkaevidbipxpssnlsqprupcvipcdumpeyrezvlzdxzwqpengyiugqbusgobgsxxxbcsobudpoliqndvepamaygrgueglxvxfsowflkzhmtgsninkgiecobbrzidsgtexvlxltipoohoaoxkslooojyyueeczrcaolsejlanqtyeetvtjlscihyibuujclpgbfzgznjxxqbcjymmtgzjiklyywhamjfdpycfaqtywuzhnvkkkpsarqxjszihjnmeorubperzbqdkzxmkjwfmnyfhgqzsintrfdolzxudqnwgkoowirkxmmrtbshgdydbsumeanvtewwfpaytqaaqfwbugwtvawqoxxtbitkgdjiwuuuclitrsaxlyyleqomzzhjdmuxzbdsdqdobnhmqoreewdbpmrvmnzsibrzizsocaziuoxgpxkqlcrxooaiduferfakupcxilxrvgscpdibyyzgvibjtukjdbdwfuebfgylswvvoouywbucdsxgvooaubjhhxnmjmjysvwxpkwemkisvfvpfesgvoksoyaafjrnzvjzscbqgmprmmrbnjtyphrwacmgbhfkpgxiyytvdtjgfurxziauixoymzchfrdynhizwjqqgepswgjimoaunqnqakyksbkkfeejdkemkhvjhnlrwoqzvipjhdreeqanuhqjdfjukhlqgvjczxwgsmfwlwsfnwxxbqwjqnatvffnyqyhbxgknkhlijccnjgxbmkdxixkvhaikxsnoacvsbwuluwfiagacqlfgqdervhzqvqxawsjovvlxvinvuvjqfbisgfcjbgkhrfeknnkqmyqxsqtlgejmdxgrygulpvrdnhoxwxdyszphcxacffedtgzphinzesdzlpxezstravtrytylbwstfzvlnayvvokzegijaclwodhddctlbslpfnnbnububsrwtexlvxfhgkluqzqykupxossvlkheptsoxcgmnocaryvinunlasadffshmrdegjmuglwnzqwvvjwpuwasewzpndmoumqrzjsorblotxjqcuwspdclcnfayyhimzuznfogkrvpcgbcmqsplnbvqebjdzyslqkzpeuqowaokbzjutiqvuxoghpjltfabfmqnnhhggcurgumdqckbowckwskrsogrnkxirlofwcoxqvbcgzpbyyvnpmdetblwxwkhjrfbwqtshaeihnwjaqpvxlmyzbxijfokizeczjdnxwxbsagycumauiuuxcwkxxexpufdmuuggafmtioxbklnfojjbdepdyjqonwwakznwfjusrhfpcufrgtwmvpnpzaymzaamzhzmezjqajzvrojqbkeqncmupdyfdhhpmvlpgviwaslqhkvsamooaekqyphvvmsnvbyrjczojeuxknjtaknktjdopcbmpsyndmvjmgaygrmpgejwelfxquecvoslzgocsvtyklwkaezzghsnpiogfsitwfknfigfbmgjmhzniebmqtaabzaoyxljukylniyagmsmpcxzcmbrxamwlzgbbdpzvicskvywzgidddfjitbereuzqhrbvhogcnalvhlaxdrjblxmdlkrqtppkxgpehmwrinbrurkrizybltkpojwhpnyjsbovbnqbboqgouefbmffobnvhfpgishijqghrixfkrgejmyxeuasepwoyuoorqwbkcxgvfitspizgxifmieyunghxbrsemepnjywuepkwovhimefasnygqdzadbvnuutipmwfnxqvlbztxelyootchpicwlzccxuqxdwfwbenfzdaopqajtureuurquxenlujmetrvxqbsbuswgngrwaexawkgdjlcxviguvmboepwhwvocklvkdpzvdpvkresfvmdqcikpnagssviaaqrwcpwxfwbrdnkvkrbgaicomqspynedeehfbfkxxkkbztvocusvxiyptvflnjvozjdwgituicqkoyierbhpjiitpcrwouoilsqromkoxjsyxytudxcinradsikwiytegqcxsgreuhsdggnjzdtbfcyojyzxtfnzobgejkwtlzqyjedwknrdjoicgtupmbpvcabwvjvqyreyzykrayhgqadtldjbjvrqnvyqpyfwagruxguwicydhcjascvexdqwqcdzydfhryusqdulkretvvjqpdbsawevvkmopfmpznkfbrzaggvrxwsfaeqossiyeipqevryhnuxdaflytknzzttixjovduqvgaduztsjcnefemanvcbjfjppmvfmqvwzjgzbgsliwchsxafnqhqqgehjpzrhactpebmysyuionrdyrjusiekjoexuubgyfntdpxjzfrdwhdckbezsgrapsxmaswjusjoruem c0=f 1626006833639000000"
-// "st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000",
-// "st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000",
-// "test_stb,t2=5f64,t3=L\"ste\" c1=true,c2=4i64,c3=\"iam\" 1626056811823316532"
-// "mqjqbesqqq,t0=t c0=f,c1=\"grigbrpnjctnyhamnwfqsjrywdforgpwpabdisdnymlboguwxuoscfyyajiyusxrocjndexxcvcqrzgxceqolvtdrpeabrcokpmcnhduylzxxljospclwuebutrdbpklpdbtkrdppamenzlmkkzttacrfxaozvwodpxzralhmdhvgaurtacnyhlsaojjglfnrylswactjumeldmuuafnmwsuuyiwhpdzqludgpluvllfowkwhfbtgsjsnxdbfbcrqnrxllmokbzrkiuxkhumfcjogeugbbjowmckoeyrilsoenowqwjpuufprqnxxzjlwfxnoljtodghyfdtyyptxafertndevhewboikewxwtwvbusnjpxwpnhhcrqqyicuuxmadxqjhbodsbexgpuaicbxduewqnogdhyjhcyjyftfbvbctgbjrwrkqtmqhzwxnilkmorotbiuwsimuvloeykzxqdepdkvvdcjtzmsvdseygtprbvhuikvomoafwnfojzaojxbkbpwbjqasazgokjjpktofqhjqhxxplkdvttwflbekawvozxiuhoahajwpimnjsbzjfhqgbbcgjgrjszmuqmwupxqlosfdsqnpkertnamcipfanxxewygtkeiaqopvykygkfbihdqqvhwapyctmxbjvzdndobrooemwtotrjzuknaupwxrjbrjzmnmupbwcdwkoghsilyfrjeefwrmgordzlafyjweavrapqqsicqnmkjulambrjxcmmsnvcjbbbwrloifqnmcmqlndubcynhumpikddliddyduafrpfgcltiymwlpbhtukmyawxdaaqiscvpfvsacjdljlfaeqhearjyczdpjsyjygfwaegqtylpibtdqinncmttbtiifbsesqbhpieectocontqhoyggjgbgjiyegoypfxorfqgbewhfqhkqftjdwtcnaiconxwjwryxqyexmlauoiysodziwfyyzyfewnfjyvfnvvxkkxeajmwbypoodsfrfygadcwpcjhzvemaplczgqxpsxkgxuqbxqhchpybojemqgxlhcyxmddjvwnbkvykkhwebfdpoovtvzgpkuwneodbochwxwauggxulmkynhoohchnkkcybhtwelotxpzoqzuczhwbjxsuqckuzsapdfkeiwxkcutimncyfpuaovhzwaebebkxgbognzpcxldjptnnldzqwtzzsiyjambnbrbyuyptxdkyxhlvigovllmkylbyzecxhdxczlkvsconlfvnafqvrwhcughqbtlmwgumeponoonhqqbklqaxvslkxowcuztjikgbutrnkmizschzrjxbmzvkkdlcuchpnrbxhgvxjqxawftlbirksyqbnltbaxtpyivrrqgxcjjgbhhvlltfqogehhddmgzivwlznkfuqgfbxrtixuonnywsxsdunhsmziyitecmrmxjkzmqhivinqkqywggffljpoxnofmzxkrmnxbdkokaleaqwbqizzhvywrweklybntszygkvuvypmukyxawhgbtnltemjchaytpqjplavcbypkwjwdmfogdxiddrtwrvvoqlmhsqlrdmmibawotbouzosrmbzocqvqdhoamuzvrfeyxkrnzpzyuacffbuvlcqupmbmqughegvjrobyzcyhynnpvvjntdcyplahaajwcbbleblkhjyauehbuoudyzrsgrtqnufijaawllbiexvhveipkaxffuiyczbkpcpzdnajwkkbbfrfchpedgabsraaalfbddgypeayprqwjzfvifjmgwaexrezitgaqgjmgizaohcfizhocckuzysshxzwqolddumeomghgsoaaerfxsapupejhywucurrhgctmmlgbyfjigveayriyvdmapvafzeydlxiwxcgnnajwjaqzkecczrdlbxgtdvehelzibmogdijpdiatcafnediqldwszonasgodqnnvajqxuwuftuvcqwtvayeiyysihhckitlimuimjllslrcnbobmumpbtoakqxallkqhszloxzpogfxxclnkmbfnqqomtzfpzdgxnfyvppeybjnekchjsafvhkrpvxpiumfvraeqcwqneatdrxdykoyscehvputknetluvfexdvtnnnaitrbdwyrzwnymuimydmadqbncdfixsgrcxkdnjzzyimcfbomioddimdxzxbmqwgnrezaquhiqytxmgsqpmywzxqksahlgwpxprtuzxtghkdbgxwpmdqxastvqggkhaqkxhjdsfcwyljwlleymorfgkezzroxaalrguityckdxsgqkcpaxxcsqvttvmmmmxezdztmkgcpnxbobuggzuqaetqfnmttjbshhfqqfsmfylavatksdchwvfvhyipfsepkwzqtprzogoxxohvibkwwiuwpsybbqqgbvziecnulnudzpudxvtcosvedrdxkhnkprghljzltucqljhdwpsfrsfryxpzybmybockeswpyihgossicvoxroiuzvgkbtduxzgsmgrohrxjbdvpnqwhgtvfkjrgpmirfdqddoyaztlooxlzllljsniwxfbihodjfywxallozikruusmzigztbzlyofrxtghhjwgptdbntmqkoxmrgzaznesgsgjnbmgarjqsqvswzygkbgquhbxsulabxzfpfnopzfnbiqeivuzayjbaikqrxrhyysttcafyxfdzgbbadcxiqltlwyhbcibkcxildnhmgwskeztcnmzdncqlyzpbzifjrhflsahlecmxwxlzmvpkqdexbfflmjhdqymxmjrktxaratynebetkfaltnrjgvsbdbvdcdyqujuypensmjnjskovbeweuwnqfjueeylefnqvmdmkwjqfvbjcuaibosymddysdymzhscroykljydnfvwosgplfphpznaqsddtbcjmyxhmcnxwdesycovtwrlmixqmpnzsbyfwpgnujxxqillwpbasdnbfxzokimfkujvlabycfwlplzpcgangjagrrhhjbrtddgitpoemixmobwyabyhsnkjtbeasdejawrueegmijbupygyciwbrhiwisguhlthnkpjqyzhiwvfrpgglqphhjtirtjxsjqxvqjpmokcgqbtjpsvravymmrrpyedruuncsjbjyrysjowqsnwtmvbakadxkxbyfunxnrkqhqvoeuzffmbpzfiiwrfdaekcrevffoxpsauhzziuyyjodsisaadbnyuugaxadvyxhfhbwhbmsgaklslihzwgvprpcawdtrniispfdnjoxkatlwebopgdqnaemwsflgfcuhkdnofblftofqzsphykpirzuckdnuxarzakvwurtrtbprdryikxmzytqhdmoyuizplphpvoliisgzhiganghwvqhzdmijccnfqqvboifovqxqvziktibyzpbffaguffgaqpbujvrvxecmaqygoyyptgzmwlnrwbeyuiiapgazdrgyrobtkcmsoheumgzjjpztatlpjkckxjqgfwvlhojdwztjgjdfdvalsglxjggmlfrbvtfeyhdbkggzukvdjnjtoytyrvxgrlvbqkkgrixhmjvwmojeiugbcyetihdtsizatgeukaczqllddwfqtwzsdquxmjmsypnftypppdsrqmkrfwxpwasrbtbeaaflqiatngmxylmhzwfoczsvcvwkgmxvhzyaoxrbblpqhbcozesrkjqncpyjukhppbubshyhwclceaefzhlbncxwdgglbtmzlksugrgnwjghgscqxfydztoraxrnthpqfojlgnablbxsovkcvpboujoczpihxrdblfirvlpxzgjhgiueyinhzasfelqnwmyhwwiaahrwoivetpfyyeeponmaqofwcbpvagruzshxaugnfzpaognklwcmfjmojrmjgmhroomwinouwdosuwtbrpkrzqtjfyspdnzgtbybsjyuohmchoukdyjfgovroyigpxqavcpmwnccdouskjxpqmpkjzkmcouwmauimkpatyfgkerqazsjuhctrbmqvqfdjfogajgrjnskzmrwnfjjfszebtbsioumdvhvqzgdkkhmsciutobqaefncvepnwqhvrfajmmrqnjryniwrckbaampnegmzoiqibwszbrqcpfgvtnlzemcmzaooywydmonegybzpdtukduxedpyquadxslnvirvewqihhnarvkpsbhmoggmoypwbimrnkuuiztvdqltnrytvvzlvhovoaekomlkqacgvlhdjaxhusehccgzjljjxjdpzpfnsrfnrxbzhoopziyrcmtpuvaqpvrevjjvmucezpecyckcmyvgnzgvitbkkdoptciamgkovowlhfcjmraynfyvlepowelkcjmjnibcsnabchcesnrwiplkavzgvdjyhulhthtbjgeckloshfcgobqovmxpryfbaaxfemkkpmtllovhqncrsbgbhjaozoycdnbcilhhlyfxbzvcmumpspgjszohxqhdocwnoxatmtnkqkpvupobukdudumdpsspzjxrcxstvajlarmicnsnjgdyyxcliqftvftmjmztbktbfmddbqtrfrygqzzzplqgemtvgijkydpshxiajzgcpmxsuamhtucpnejafrjqiwdxxflmaeyhntqfftvmsovtzunqszbvmvjhxcemtorseiariixtbnmxhkcwrghzposhvfnorlcwipsolpmkmvfpwdjswietamqfggxhpwfnsbkooocopbjzzxuhqxbtkklsxmmsgqvxldrfutlgntrewlyksrxdfexgkburyxbuqzhjmvqsqdzwppzyoqibdbhavyhexuybqhstktgtvtrckzqezauehcoxlnntilnkqekvdachlmvuxcowizzbqrldzaggpbvvlfwhsqfdyqvwqwrbkqvrqpzdihtnnafxbxqulzfswevlvxsjugrsaombysnngstmnlyayizrynmofiwbggehbfugsufhmyogsctxkfzlwwwshxnvoaqvstgpjtvyczlgoueutienayowbzwuhzearmhhbukmebpyewdrlmflwbvrzfhrkixvgewburjiqfovxrkiwvvbdrswvbcsznriinohlfeukcxmgmoyrlpqzjtgpjpvsnzbriifdyljkbqqiketrpvmvimmxhmpxlfzqluenskwrtshagizqrxigmmynfppfxfzxcvwbogamdxfipiqdasphwixefwvgrihkqjcflqvqqfvzxdtqyvvnnfzeucqhwlmxjconjuqkachpnysbnhrcfadculwgxcruihnuixxuvdmztugpvesdddargavwiudrtybxwmvywqleepplrchioqkyomusvamfawlxcwdkdjnydcmgfrlmkxpvqhcuioilsahnzrvlxnrfyxmjxvtlliyilcjtcwwcuucgurbbcshnlzrzilgkhdojcivhgltssezykltyzcubevrbapzmnhfhtntgnmjytjubvasdfiagwlzzwohzaibzqwqdlsikaodfljcgnhyckowudmfbqimtuszqgyxxzvipniipgsotrpkzamiwpkngnvwmjjivjtxhpzlmrwcjznavijhjjmvhxkjdahleprpynjnqltqhyamkfdfspbridpbuphtqxkncpognjgxwwyzxnkizrzvobpdxepncwvuhdspajmooiybeksqkhpncluiwwgsapihnkvgmwektybpzlnizkhtxtgeqqgaphditecptyoquueofaleodgsvfjxhokmzgjwflceebrbbjkxvqvkymjatpvdcvatnvkecfpxrpvwgnusmuetshyeyphgzjwktlwycqjqmsfjtiqkkbhndslyfxdegaejuzfylnbqlvacephpbuytqmxvwosukulbwdoofqomqlgdptocqlnjkikcvwcvyrpubzoeegonjhdtuibklelcgtacvovyntmucnzknumratvvwcphkfcxzjfmzwbqluzpexancupokekqnykxmwnyxvclvvxstnbbylaqknrgfegxfgkrnipkrstthxkkyborfgciqgksruwjzxwfuztgizrjrilmshcmnfzxwucrsscgotmniegribamhyzwjwyeuminjukrurpspcjmfllgceyuivmqfgegjjjpbswhjijrlajtbtevijdyanduyhbtedmihjaadtwbnjgrhlxgbvxxmtqzinsclkctlvhocntuppgfeaubksbwxouqsmdeaijulvlpawxuuvadmroswmaceodnqnxaxnwxwsoogqctfkadzabezoeufgskhtxgeefigmjcwrsoymyardzujtpejrsjslnorwixaawuqkhtgtqgrbjrzoxdpgetayqwsvptbwoljgypbkaxjcfujykdtikngwvnmwlpefdecpkywsbkoqjuyiaaizknmygqiqdjhfxfzpsdnlzqosmcdgacngjdrmhhnmltesihrwsfrfjvhctfjinwolonpeuibvxhhunarulabdrrwpipkczhxaxrqxvydmuerawuoshzupvvhfhlvbdahibhygftjmfqostlufujpwrfduppuhidftnjegdoqjyfekysuglomymoybrcypfkabcgiddimrpahbmtjwropodagfdfrpffqqgffriqcmvqsbnrjqwkqrpappefsabbjkotyspncbzjdlqjobgzkxzebhuliwikfvhfroqotbwsyywapztlwnnumngdwuinqefmgmndpvfsmmzrozkzplzmgjojgkzwkfwgljxrvvfuvozeihsiwqvksibqdkbsqslxwydowhsekwuslrppizukfcvvfxuffrnnceoriukxnqoujatnhqvgjaertcqcdfccsttyirwzxytgflyoedmkhzufythspclmyrwzxlvvhhqohxdppsvzoqgcvclykgadmtkwxfnzpcoziukoajwjjaiufyzormcokrwbdpnhcotdmvyihscatzmotgqoqthdcdegnxxsxdqgtbdirmvujyvssdvpztvhzaklkqvvhkpqmqyrwbfwcygnvbjjvrfmccrmjmspvqmxadbpipprbcurcjcjyjjbnzbjdnpgobvckrdcbjiphtgmavthjedrkulplgedfiavvdupwfugxvrowmuipujzqdkzebvfgzqxxznnbdfjmfrrgjwpqkudgscpotdhtguvgyymhhwkrctnvuphhjnrwcqzwargqxxpsdvsvfynlhxrzekjfgtdmcaspmtmzdaojduyhqieipeetptyfuhrynsszfnxcgtvnfahfgkjfbxmgnuhxtifzhgtlmjlgayybpshyzixkvocjlorxlpvsjqgssxlwmxwpmwouocgylxbmyfrezwpubyewxsnqalzgetnpdfwrgxsawaargjclnfxoucwljnuqaiokxgixwogrmfhegurpyzitefhejtqawnmglkhlhxoxblmgdhzkavxnqhoeagcrbbqlssotgphffqtcgkupzvlkmljmjomnqxgcmiyysmkvziridmuijdrzozgzxsuiudhjzuxxjoatipfcpjsqqckmvcgsjdaoecooposrptdwtrdwvfltbtczbnyqhvdrkphccwyyponubffazdikxuifbxnqmoubdtqbpxrpsfyoevuwgmwlnvgblxlvshhdavmdhbmurkmlhsiepzyiqoaiugfdzwkpmtjozzpqfrxpafkiebadrglatgpoiargnyofrhsdrpfgdipxnlsxopmbhxupantpxyasrvqziefcarckihgxkbfszzgtjpoazjuuuxxccegqhjtsjqdhgshczrznrbyjrraxeyzdgciyvaeapkwgvkejkrckdsbyekoukliqozslwgghnjrfbzpqkrfwjawoutztlnasoecujozksrefzdduhnvnskvziighbejokbqyrdespapyqbidgkzwlfvapyjcxcoybgwxweivmzblrdyumcxcnddqgvlthtfjwmefwzkzvnycnfduawgvsqmullejnpapzeujmmwkbmtalkrpunhjlargfhxpjphesgxdvldteileyzxpftdikjyyqgldfwrzglixzuegwslfyhrqjceeeggllgbvfeaefztngfpjncbjeyfmyvcmdashzponstxigskortcevevfpqcbwzmqrbvbniwjwajbdhdfqlyujnwiuveihahtbakokmzkpznqqrqdbbivaettleiciafubnklnowubzzhvzhyhkfhzvvcsajxkqnruuyoaxmrahzmqnuedlmjyiioucsaxvhspmrmglcmpoxvqzwssgxgptdcclstkjxwwaqekdwkixnowusxbnftnzjectfsckbeeevhytludfcdzwdiujywcsgwrvmbecqwibvusgqhhvmztiavlsmvlwztgburxaaotbcslvxnffaohthhwhaatkyvaptdxwoztfcqovimzbpsbxwuwbjwkbdvrkuytovzsvcmkporgabibniqiiobhljsbgeqsdbofcdpuxgdiqlmpwpadfuymdmauguvvewtnrkbkgfogitcidofpaduxeetslyqppgsquivqvvmfmdpyfvmqfliuhkasezljpmlagfgqcqahtfojamfwjmptsuvgbslskjmvqhmdlhouymghfngfysjiqkjfcwbjjtorzpjblzuabghntwyxrcqrrtviijbcknzjolpatwpssnzmobrpxwyaubjgakgdzydkkvsisnfscwklbmkdhrzbopcdmimqleofwvfugtbtogbdmazqjmlslpfeukuqcpmpwggseebnoqpadfpudcnriiwlhojpzpbbqdgqoweijlyjplkxxpxawanihmdkxmmdsdlknwcxrbsmrpsxawxxoepzckcilssqxntruzwmtqqjrxsupdaedboovfkecckmdxtymhagyoweznpgtwxkpbnoqfkrnzvsxpdlgynleqcpyrodfqngjgmkweiotmvpmbujluktefwwhhprfqtusjzebtnhyztjhbhlnmfzdrcsxktxbzqsoczgwoydpcssgksstfeslmesjkdbwhlorqtswfcfsxkysbedidqzsxorpgnhgieonzdzlpyqxjkkncypuhjtgwzxvrqmpleelcampexgswcdtezuqdghfzzxkzzyulqpfojwsdgcdniblomxrxflbnylwqxtifxxfkembyxhkvhfjnmpdinrpodvticucowipekvthfobnkdvgfhoobhhtwdtppcogtwqyynixndujqclzrvwfirjqsmvfjxbhisdaugeaswspcljkdigdqcekcftqcemsjlxhplmrxootbcsjylvkvwtvvnusaxtkxcjrxazsjeheguoxrebicpecuuorpwzsgpfgztgtfpilvauzikosbtzbhrwafktgltkteknizcioxefizyvwfgyfwhbgkssmvobxrzvqfkdhcvezdmyvqqedjvspyvsgwqwovdxrecdanapoydetgehibxaslvllrqkxdzhsebmrdflqxylvgfaaghcstzrlutizgxkgfjzatylehdqcctkhqahctbyazuibdkvvgyyoqlmiocgkripiofrbmjvkavkebaelrhrizmzbskptanrhwzcpzrtofjxzkrushctxejlaziteklpjakzskzklmdgukiabxxduslretgbomoexppmgimlfhfehoswtixefjffecudfmacfvlguvvbzcbtgywrxbwifkrxlhoqvtslpwhbcanoaynjonlyiobcwstxshesdowbviqdejatogcfbllmnctasbeininbnwmtpdhmuvurvtpnkqpscvwtlzhtlpvoztdqbncxxmqymjojjnllivocansiodawzlcygkejjgisvzvvdlmacnffffhxyodgtmmlevrjhnplezrfidsuygsariqdqbyvntpqnurmtrxtentgnopsipnayoxipkvysbunxqjisyjevmjvgxoqruhxvsqedcsimagxmsbjslwohsckiivuhbjnegobkpxjdoqfnicgunugidyfngasefvcbwltaljvxamhnuefkvhgbwyozaggdszyqghnnfmcyjfvhfcamcxjrggysglomdptedlthpfxmmbqbfzlzgodcsahagnuepupqbrfxjgqldwbuenabygoeduhwgtxnfmzlsojbvxmmavdbmxivmdozdratbytpyjysrzpejdggqguhyeshcobbfodtuqnwwundapkfkblfzdlnsbylsufiuycoejkljrcovadehyazpwqordifrsomfskmjzogqciiluldojkxfgtwrlbqjekbqotuhffowjptmjkitgolgsofzkvjasgzktoophkpnidqujvcdxofcfuwwwihpgitnsfsrgxxqzvzfjlabwqptlvsusszjajgxshshzncuhafxndwqcxujigvkymfztczglcuwbzhgomvqxkdmxilzewacpnffzlkxezzpxbfvlfosxkvmdopnuwoqkbjrogfecxpzcqvyzeuadikskcwpgyknryrgcumvspxtgzzdsoebizpsehtpqfmtgnwjhrcoqthrjxjugvzyhvoglnerbyffgastsyoizzzrmmeawztdizcebilasdsthmujjvmjsssvhwlyglddnljtigltporpjaiokkoeuqreawmpbvbnjiuvhdslieeanfazyxubwacizffpahfndinebzcqdrnqnbrwdddcorvatawhqeacjtfikkastvtluavsyixwldxuifyxpmgtxqpdcpyggdiztwihzsvhtotqgtscvmwtpsakuuyuastebtsivnoemlzhdllyvyifirqcvxfapegnfyaxepsvvqhdrztwzgbbtbslrtifugxhrsiidptyafyaxbtbrpxlsvwmxvcmrpgatnpnqoghnjqqxwtfpsicpwrtwtqrxgxrdzlkamgspznezzlezvaftrvbvjatefhsrtrqusxnrxrahdckrsdgyzbtflaaelpkwfddzgapzlktcrizyawqeazasrtkcsryowkcjvmsbkvhkmdxrudjjpczpzfxtjbmgpvwhchvtlctrhdqqjijrnkunalsucruwhhfrrdsjztcrkivvrlszopymvuxnnlqklatzgcjjuxmhrmydtcyxhisvxepljzwjuhinuxvsmkdtmrrojutimnivlxxcjvgbpclzuxcppfopckrvndccoelzzmzcdyqrkuxdwompgshazcuzxwytnjeejmpwpabiuaorkhctezqydizuuontnukrkvithhctnmwwivqbabuvqwvjyxpwsgpsoyszfsnjeeofmqoyxyakfcmwrwkisglzadmtcolpwhrnpasmxbkozdfgtuchqhvdnfahlxbzqqxgfisdjrwwqsjihtcgflpnskznnfdzeotcrzylojcuvsabyngjoetcptkdbihowprmxokppjfjvxsztypzkzgwuurqmlwdzapowwsaozebryypltamqzmduirxskstryqrdaagwerlbnwgteibjiktrladowyuhsuasqppzkqtsvpxzdcxyulrqgjzspppjqujffcwrtovaxaflttvrwdlojqdmmcvgeoiieifzkpzfusoozkunhnxaafpnrnhsfraglsbylzbigxjxqbjgxfbtevzeuqrewyjywvmedtwajobluxrsdlvaghovxhfcieuudwrhffehgfuwkqvgpofqijpklraclahqmewxggvrboqxveabgovkfylybrbrxqnvljafuooyscossddrmuosmcxbthyynfuhytjyhkkvwiaqpicrfjxvwftatdwhwuxxqessofyecjfzbzhpqblvqooasrwnlaqrlzindcvxzunpizmgpgsmnangmfargzqcvclgphenedrlpkfnhcccwzkhsgswrtqnqidkaleqitfqyikkjkcjokeelqfldfbzmtmvcyfzbpcgbsjfedviylpheoilpddgtohbywwuuqdcvihhcqtkrmntgfkeytjeytnfjzxongdmvahbyubjbsdkrejpiyezopkkvrfeiwycizgexqcnrpbqwrksztcjqlrhbyhenwqlxxjkicoajfphdxjpndnkfsjrfoqsmntuenabiufbjkyhemewernlberrrlivflnehtterrvdgnrlaosrljjggogsyxpguzinyohitcbcaqebmogtkamdzhgtiufxeshimfdrcmfqrqtcbapddsmerewofiqrwprfcgdmwzuddpavlnohnybgibsnxtsnzilexfehiphpbpqghnocawhmkzakmotjkeuuilvkagummlcclpuwxbyeoubwqtqsokrnxgotuajewgabdyzzyglufirtdfebmvafinbboecugtxacqdxwmbxyhcksiygftwubfrnxlivjiofvjctzygkjqsnjlhhmoshoxpbrkhbqkztkhjcmeqxgpzxymlfwozldnpllxboixvivoquplfrvtwxljpbmyjlbvbgujcczqhjwdvqtgatnvcuwmncazwmsykjsgjpvhkgusfzyctzmigqowhmdicguijmatupdjzxqbunxbeqardupokgfbtgnkwmajdacajwzsuvpiwjmtzyimluenlcrybwpvtuztfxfsvgrgndhljizthoceovimkxsxyneohxnbzbkmnxlidsczqkknlhrbqdlhxsfypqviucqiywljmiqlzlaofolpmtkvhzgvscoowmzlkehvfidefmcfeqssjquavrehjhugjoeeuqrrskpnsituzqjxoydxxssszyzgmczdtdahjisrjjgdwlnjglrqrzrnyudairljibcutnfcojuyjzuhazszfepncfyvoxgnbiyyixzspcnlhclxddafebdukcvdkblnqmzqkonheehbszjkhhyvecpizqjdnwraosmbfntitxhocadbbniuqzxfuyjqfrpvocwnrziazjitmtxxvkewhfdcewxqfovliuxzilicokdjmuncxipcixlipdcmuwukshsotjjcabvewtorjckmmqtknmdrsvvgqzilbwnxuzlogloepyrsaiqyoxwjwmxbnwckvbiesvybwdqvjnywbwthuadbgeieblmdboggqwxugtiporxgkbroidfuykuypwyavecwgfskshqogbvajbthoemgryusercuxztwgtbzofcsiduoavtafszohwwchuqjpjbbrbxsudotmxprrtavzddwxijonauwgscsvtrjwomoqchhwxtohwatxghcvsbaqxsntzsluhxsmrajjefralceyhhympznjuzmwqummdxuwwqzwdffrgkjggfnjnyebxegzzbujfyeivmlwwgwrglrooznuhlfvguwezrqwnekgnahtocwbjamdtrtowwyyohusnsmznehzpieuayritybnlrldihnbdsbsbdqtpdpqyjkdrcecfwlljsljpdgifxetsajmymzcdlefllhtcotecgnbtputyabsmeigdjxwxywoyvyimdleebaadxpsfeadudsvebxbjrnotvqldkxutesdeimkhwpdbbyvhxsgjalsoosgpikstmbapoffdkljthvhlagsjtnglpuomrvejsdvfcxlgwhitnekotzcmagrjnvqdumqohzpshypkcijkgwozgyxvdozkaasbuohhkzaabuhllmnvtxtwqooxzkkcfaveprjtvklmaoxtftwzkdbpvvuezwbgohnzcomtjsudbbdpowrrtvqixxfellzkloxbrxdroctzwywujgzzptupqmfpstlpiowfnmdgvgkciyzlvskiinwoxsxvbgyprttxjgasztpuvjvwztcnutyxplebjnsgipbarhlcnwjkaspbohchtiurjfykknkslygfkomhqnaiocohyccfguufzebncmchjsapecxdbkouugsmtnipfmdxdamfhfoxcdoqjnjnzfpqsgdirbcaszqchlqhxupypvepxgxecyrwpkaziqndjjkjrpqjowpspvbizerthixqznivlbaflzhtujtkmqgcjdkpnjdrxktphtwfbwpcwcavxaxdrojjteqsajvizogsvgcctyinjqzsjplfkjajuxaprouznlyepxtvfswdsglgbaclhnpoiwkfqrggbmlmpdavzubxdcifoxaokwfwonulygizsuvxqxnomczdjcrcgxfduosvazmwzbzlhcuvxywlzguxjjkkvyutqwwlvtgxljaiercxbzmlwgudfdhseusqifvoxksxpbxublditsfyiflzcvzfdfpdeibmoekyjpddexbfnudsusdxbthmtfxhrgwtiirccxhbizvqffcwghjuusqfcbynbfewdsskwexmpvtrilyqsgraromzsuhbqnjldrpchnclecocjykihgzlwynfcrnhigbfxkrwblbphkdjttqjihergujyickvhoaomtnkmsjpzkyvzljexphylviqyhnbuxrgqdirpcfbevfjtmmodmarupbgicdefifsprpfqszlgjpnhzowtorkanvprqqtjiausxyhtjmtiiwfidmasztdcpynqacphntdtmfpdvpjtaekaggbevqmyxymtiokdspbzgnxubwikzaapehiabcktjhwkgjzhzldgrxjsfyuwgmghenfrtzsdauuaodxqvvyerjuebapknrmhwjhbrojwzcodwhpdbgaeninbtyrhzqxsfpdwzrvfnbruccjfqfupcdsiqjlnrfjasrkhznssbintubxchjhhiqahjtkfawlfyonocudtjpgkgjlyrrkohgrzzvqcwfwgprofintyzpwiregtwyxjywrvrusvnsqyvciubsqaotawxlmromuszooghkkfxwjpsdmvxdkjukxjwjdksmrxpkkpxtpbwbfisfqneuohxhbinrbxmaklfdjzhhpzrfnzrkpegzqnjmdlngvthppmovnrbclgpmiqnzzcwxgstfbrtmealyfigyxfnogdxpoxonzlrzjrvoplzkaimklngxqvhkiijuecthgeqrtxfsajsimbwyknlohabegrkrrwfytemczxogrdrrgibzankeqfddiufslellsggwthsvwkentzyrfppyacqczprryimfnhzowoxtrlpvmtfkstlbcgicqsnkgpysifmykfdzreydxneuydzjhqpmbwrbxgefmsojgbhuxkdfhpfxzvdbpfgdhekmnmaokhssvbsdbgqisfcpwsfzsvojfjsrqhuuduwifaywnthkiuhsrgnkrvuknmilvrowfwsqohmrusibdhuhcjvzjlvrufbtypotgjqagipmmhlcmliieerwhuizsvjnxtubqwabqaifcvsrzlklwxbgwfmxrmimdgxjnlaxtctdrerfpxvifkwbxuskrybktiyeambeebcptsvvmsmhgdxollkhlomdzlyjvyvvnrbaddfrujpvzngaisvfluqjscncriugpimqlcinkebcrtczhiyyirdanhddlusnoezbziuwphjeejhfivvznkemfbtcoiyahtljlynrwzearpvekmzhlguwvmgmmbwzadorelfxidnoiwiehpzgzefmppajnmttvdyemgzwfodtlpirdsmnzkitryomcyfukylxoinaornrtmdisoiuddnzwqitqzwhjecrmyhoretzgxciqngpsxcfgfzyneoxresrogmeebiqrcnpyehfriprzueajqfnrczmullahnexfebqaqfnzzkysvbagwemvxttmwvrvflcfjenjoizhuubutzmsxogboepyyezibsqbmgkwkwrcjyqhikbfpiqsmrjmqriwppdbijldaqzxpuiawhxkaujicxchftemfyfmscxhbxweswtjgtlmtkhpyvpybrkmtgtqvtocnqvaxpkjwkedgvvgsjiftgdqdbukackiefopjqpnhzezgrgrzpyvttugsedhmjcmrvnkeofqqignddniiazspgwgfbxolzwwklvairwvqchjxybwfjugmyflkkuuulqzgqkgsuymvrlemwrblieexszuzkygujowopflsaadzidkrqgsnmntbipofuwrahnypixrpzp\" 1626006833639000000",
-// "measure,t1=3 c1=8",
-// "measure,t2=3 c1=8u8"
- };
- pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql)/sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
- ASSERT_NE(taos_errno(pRes), 0);
- taos_free_result(pRes);
-}
-
-TEST(testCase, smlProcess_telnet_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
-
- TAOS_RES* pRes = taos_query(taos, "create database if not exists telnet_db schemaless 1");
- taos_free_result(pRes);
-
- pRes = taos_query(taos, "use telnet_db");
- taos_free_result(pRes);
-
- const char *sql[] = {
- "sys.if.bytes.out 1479496100 1.3E0 host=web01 interface=eth0",
- "sys.if.bytes.out 1479496101 1.3E1 interface=eth0 host=web01 ",
- "sys.if.bytes.out 1479496102 1.3E3 network=tcp",
- " sys.procs.running 1479496100 42 host=web01 "
- };
-
- pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql)/sizeof(sql[0]), TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
- ASSERT_EQ(taos_errno(pRes), 0);
- taos_free_result(pRes);
-
- // case 1
- pRes = taos_query(taos, "select * from t_8c30283b3c4131a071d1e16cf6d7094a");
- ASSERT_NE(pRes, nullptr);
- int fieldNum = taos_field_count(pRes);
- ASSERT_EQ(fieldNum, 2);
-
- TAOS_ROW row = taos_fetch_row(pRes);
- int64_t ts = *(int64_t*)row[0];
- double c1 = *(double*)row[1];
- ASSERT_EQ(ts, 1479496100000);
- ASSERT_EQ(c1, 42);
-
- int rowNum = taos_affected_rows(pRes);
- ASSERT_EQ(rowNum, 1);
- taos_free_result(pRes);
-
- // case 2
- pRes = taos_query(taos, "show tables");
- ASSERT_NE(pRes, nullptr);
-
- row = taos_fetch_row(pRes);
- rowNum = taos_affected_rows(pRes);
- ASSERT_EQ(rowNum, 3);
- taos_free_result(pRes);
-}
-
-TEST(testCase, smlProcess_json1_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
-
- TAOS_RES *pRes = taos_query(taos, "create database if not exists json_db schemaless 1");
- taos_free_result(pRes);
-
- pRes = taos_query(taos, "use json_db");
- taos_free_result(pRes);
-
- const char *sql[] = {
- "[\n"
- " {\n"
- " \"metric\": \"sys.cpu.nice\",\n"
- " \"timestamp\": 0,\n"
- " \"value\": 18,\n"
- " \"tags\": {\n"
- " \"host\": \"web01\",\n"
- " \"id\": \"t1\",\n"
- " \"dc\": \"lga\"\n"
- " }\n"
- " },\n"
- " {\n"
- " \"metric\": \"sys.cpu.nice\",\n"
- " \"timestamp\": 1346846400,\n"
- " \"value\": 9,\n"
- " \"tags\": {\n"
- " \"host\": \"web02\",\n"
- " \"dc\": \"lga\"\n"
- " }\n"
- " }\n"
- "]"};
- pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql)/sizeof(sql[0]), TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
- ASSERT_EQ(taos_errno(pRes), 0);
- taos_free_result(pRes);
-
- // case 1
- pRes = taos_query(taos, "select * from t1");
- ASSERT_NE(pRes, nullptr);
- int fieldNum = taos_field_count(pRes);
- ASSERT_EQ(fieldNum, 2);
-
- TAOS_ROW row = taos_fetch_row(pRes);
- int64_t ts = *(int64_t*)row[0];
- double c1 = *(double*)row[1];
- ASSERT_EQ(ts, 1346846400000);
- ASSERT_EQ(c1, 18);
-
- int rowNum = taos_affected_rows(pRes);
- ASSERT_EQ(rowNum, 1);
- taos_free_result(pRes);
-
- // case 2
- pRes = taos_query(taos, "show tables");
- ASSERT_NE(pRes, nullptr);
-
- row = taos_fetch_row(pRes);
- rowNum = taos_affected_rows(pRes);
- ASSERT_EQ(rowNum, 2);
- taos_free_result(pRes);
-}
-
-TEST(testCase, smlProcess_json2_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
-
- TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
- taos_free_result(pRes);
-
- pRes = taos_query(taos, "use sml_db");
- taos_free_result(pRes);
-
- const char *sql[] = {
- "{\n"
- " \"metric\": \"meter_current0\",\n"
- " \"timestamp\": {\n"
- " \"value\" : 1346846400,\n"
- " \"type\" : \"s\"\n"
- " },\n"
- " \"value\": {\n"
- " \"value\" : 10.3,\n"
- " \"type\" : \"i64\"\n"
- " },\n"
- " \"tags\": {\n"
- " \"groupid\": { \n"
- " \"value\" : 2,\n"
- " \"type\" : \"bigint\"\n"
- " },\n"
- " \"location\": { \n"
- " \"value\" : \"北京\",\n"
- " \"type\" : \"binary\"\n"
- " },\n"
- " \"id\": \"d1001\"\n"
- " }\n"
- "}"};
- pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql)/sizeof(sql[0]), TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
- ASSERT_EQ(taos_errno(pRes), 0);
- taos_free_result(pRes);
-}
-
-TEST(testCase, smlProcess_json3_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
-
- TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
- taos_free_result(pRes);
-
- pRes = taos_query(taos, "use sml_db");
- taos_free_result(pRes);
-
- const char *sql[] ={
- "{\n"
- " \"metric\": \"meter_current1\",\n"
- " \"timestamp\": {\n"
- " \"value\" : 1346846400,\n"
- " \"type\" : \"s\"\n"
- " },\n"
- " \"value\": {\n"
- " \"value\" : 10.3,\n"
- " \"type\" : \"i64\"\n"
- " },\n"
- " \"tags\": {\n"
- " \"t1\": { \n"
- " \"value\" : 2,\n"
- " \"type\" : \"bigint\"\n"
- " },\n"
- " \"t2\": { \n"
- " \"value\" : 2,\n"
- " \"type\" : \"int\"\n"
- " },\n"
- " \"t3\": { \n"
- " \"value\" : 2,\n"
- " \"type\" : \"i16\"\n"
- " },\n"
- " \"t4\": { \n"
- " \"value\" : 2,\n"
- " \"type\" : \"i8\"\n"
- " },\n"
- " \"t5\": { \n"
- " \"value\" : 2,\n"
- " \"type\" : \"f32\"\n"
- " },\n"
- " \"t6\": { \n"
- " \"value\" : 2,\n"
- " \"type\" : \"double\"\n"
- " },\n"
- " \"t7\": { \n"
- " \"value\" : \"8323\",\n"
- " \"type\" : \"binary\"\n"
- " },\n"
- " \"t8\": { \n"
- " \"value\" : \"北京\",\n"
- " \"type\" : \"nchar\"\n"
- " },\n"
- " \"t9\": { \n"
- " \"value\" : true,\n"
- " \"type\" : \"bool\"\n"
- " },\n"
- " \"id\": \"d1001\"\n"
- " }\n"
- "}"};
- pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql)/sizeof(sql[0]), TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
- ASSERT_EQ(taos_errno(pRes), 0);
- taos_free_result(pRes);
-}
-
-TEST(testCase, smlProcess_json4_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
-
- TAOS_RES* pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
- taos_free_result(pRes);
-
- pRes = taos_query(taos, "use sml_db");
- taos_free_result(pRes);
-
- const char *sql[] = {"{\n"
- " \"metric\": \"meter_current2\",\n"
- " \"timestamp\": {\n"
- " \"value\" : 1346846500000,\n"
- " \"type\" : \"ms\"\n"
- " },\n"
- " \"value\": \"ni\",\n"
- " \"tags\": {\n"
- " \"t1\": { \n"
- " \"value\" : 20,\n"
- " \"type\" : \"i64\"\n"
- " },\n"
- " \"t2\": { \n"
- " \"value\" : 25,\n"
- " \"type\" : \"i32\"\n"
- " },\n"
- " \"t3\": { \n"
- " \"value\" : 2,\n"
- " \"type\" : \"smallint\"\n"
- " },\n"
- " \"t4\": { \n"
- " \"value\" : 2,\n"
- " \"type\" : \"tinyint\"\n"
- " },\n"
- " \"t5\": { \n"
- " \"value\" : 2,\n"
- " \"type\" : \"float\"\n"
- " },\n"
- " \"t6\": { \n"
- " \"value\" : 0.2,\n"
- " \"type\" : \"f64\"\n"
- " },\n"
- " \"t7\": \"nsj\",\n"
- " \"t8\": { \n"
- " \"value\" : \"北京\",\n"
- " \"type\" : \"nchar\"\n"
- " },\n"
- " \"t9\": false,\n"
- " \"id\": \"d1001\"\n"
- " }\n"
- "}"};
- pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql)/sizeof(sql[0]), TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
- ASSERT_EQ(taos_errno(pRes), 0);
- taos_free_result(pRes);
-}
-
TEST(testCase, smlParseTelnetLine_error_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
-
- TAOS_RES* pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
- taos_free_result(pRes);
-
- pRes = taos_query(taos, "use sml_db");
- taos_free_result(pRes);
-
- SRequestObj *request = (SRequestObj *)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
- ASSERT_NE(request, nullptr);
-
- STscObj* pTscObj = acquireTscObj(*(int64_t*)taos);
- SSmlHandle *info = smlBuildSmlInfo(pTscObj, request, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
+ SSmlHandle *info = smlBuildSmlInfo(NULL, NULL, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
ASSERT_NE(info, nullptr);
- int32_t ret = 0;
const char *sql[] = {
"sys.procs.running 14794961040 42 host=web01",
"sys.procs.running 14791040 42 host=web01",
@@ -976,78 +539,36 @@ TEST(testCase, smlParseTelnetLine_error_Test) {
"sys.procs.running 1479496100 42 host= web01",
};
for(int i = 0; i < sizeof(sql)/sizeof(sql[0]); i++){
- ret = smlParseTelnetLine(info, (void*)sql[i]);
+ int ret = smlParseTelnetLine(info, (void*)sql[i]);
ASSERT_NE(ret, 0);
}
- destroyRequest(request);
smlDestroyInfo(info);
}
TEST(testCase, smlParseTelnetLine_diff_type_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
-
- TAOS_RES* pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
- taos_free_result(pRes);
-
- pRes = taos_query(taos, "use sml_db");
- taos_free_result(pRes);
+ SSmlHandle *info = smlBuildSmlInfo(NULL, NULL, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
+ ASSERT_NE(info, nullptr);
const char *sql[] = {
"sys.procs.running 1479496104000 42 host=web01",
"sys.procs.running 1479496104000 42u8 host=web01",
"appywjnuct 1626006833641 True id=\"appywjnuct_40601_49808_1\" t0=t t1=127i8 id=\"appywjnuct_40601_49808_2\" t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7=\"binaryTagValue\" t8=L\"ncharTagValue\""
-// "meters 1601481600000 -863431872.000000f32 t0=-418706150i64 t1=844637295i64 t2=482576837i64 t3=736261541i64 t4=L\"5S6jypOYDYkALfeXCf2gbUEio7iTM9vFOrMcGqYae0yNeDAEIrKHacOo0U7JTrev\"",
-// "meters 1601481600010 742480256.000000f32 t0=-418706150i64 t1=844637295i64 t2=482576837i64 t3=736261541i64 t4=L\"5S6jypOYDYkALfeXCf2gbUEio7iTM9vFOrMcGqYae0yNeDAEIrKHacOo0U7JTrev\"",
-// "meters 1601481600020 -163715920.000000f32 t0=-418706150i64 t1=844637295i64 t2=482576837i64 t3=736261541i64 t4=L\"5S6jypOYDYkALfeXCf2gbUEio7iTM9vFOrMcGqYae0yNeDAEIrKHacOo0U7JTrev\"",
-// "meters 1601481600030 63386372.000000f32 t0=-418706150i64 t1=844637295i64 t2=482576837i64 t3=736261541i64 t4=L\"5S6jypOYDYkALfeXCf2gbUEio7iTM9vFOrMcGqYae0yNeDAEIrKHacOo0U7JTrev\"",
-// "meters 1601481600040 -82687824.000000f32 t0=-418706150i64 t1=844637295i64 t2=482576837i64 t3=736261541i64 t4=L\"5S6jypOYDYkALfeXCf2gbUEio7iTM9vFOrMcGqYae0yNeDAEIrKHacOo0U7JTrev\"",
-// "meters 1601481600000 -683842112.000000f32 t0=354941102i64 t1=-228279853i64 t2=-78283134i64 t3=91718788i64 t4=L\"wQyjbkfama3csU7N9TPIVAzx3v5ZUoMg3bn3jq3tqSuHAqky8X8QnwbeQ64AjGEa\"",
-// "meters 1601481600010 362312416.000000f32 t0=354941102i64 t1=-228279853i64 t2=-78283134i64 t3=91718788i64 t4=L\"wQyjbkfama3csU7N9TPIVAzx3v5ZUoMg3bn3jq3tqSuHAqky8X8QnwbeQ64AjGEa\"",
-// "meters 1601481600020 178229296.000000f32 t0=354941102i64 t1=-228279853i64 t2=-78283134i64 t3=91718788i64 t4=L\"wQyjbkfama3csU7N9TPIVAzx3v5ZUoMg3bn3jq3tqSuHAqky8X8QnwbeQ64AjGEa\"",
-// "meters 1601481600030 977283136.000000f32 t0=354941102i64 t1=-228279853i64 t2=-78283134i64 t3=91718788i64 t4=L\"wQyjbkfama3csU7N9TPIVAzx3v5ZUoMg3bn3jq3tqSuHAqky8X8QnwbeQ64AjGEa\"",
-// "meters 1601481600040 -774479360.000000f32 t0=354941102i64 t1=-228279853i64 t2=-78283134i64 t3=91718788i64 t4=L\"wQyjbkfama3csU7N9TPIVAzx3v5ZUoMg3bn3jq3tqSuHAqky8X8QnwbeQ64AjGEa\"",
-// "meters 1601481600000 -863431872.000000f32 t0=-503950941i64 t1=-1008101453i64 t2=800907871i64 t3=688116272i64 t4=L\"5kb9hzKk1aOxqn5qnGCmryWaOYtkDPlx1ku8I5hy3UVi6OwikZvBlfzX4R7wwfUm\"",
-// "meters 1601481600010 742480256.000000f32 t0=-503950941i64 t1=-1008101453i64 t2=800907871i64 t3=688116272i64 t4=L\"5kb9hzKk1aOxqn5qnGCmryWaOYtkDPlx1ku8I5hy3UVi6OwikZvBlfzX4R7wwfUm\"",
-// "meters 1601481600020 -163715920.000000f32 t0=-503950941i64 t1=-1008101453i64 t2=800907871i64 t3=688116272i64 t4=L\"5kb9hzKk1aOxqn5qnGCmryWaOYtkDPlx1ku8I5hy3UVi6OwikZvBlfzX4R7wwfUm\"",
-// "meters 1601481600030 63386372.000000f32 t0=-503950941i64 t1=-1008101453i64 t2=800907871i64 t3=688116272i64 t4=L\"5kb9hzKk1aOxqn5qnGCmryWaOYtkDPlx1ku8I5hy3UVi6OwikZvBlfzX4R7wwfUm\"",
-// "meters 1601481600040 -82687824.000000f32 t0=-503950941i64 t1=-1008101453i64 t2=800907871i64 t3=688116272i64 t4=L\"5kb9hzKk1aOxqn5qnGCmryWaOYtkDPlx1ku8I5hy3UVi6OwikZvBlfzX4R7wwfUm\"",
-// "meters 1601481600000 -863431872.000000f32 t0=28805371i64 t1=-231884121i64 t2=940124207i64 t3=176395723i64 t4=L\"7pkY8763Ir0QeugozDbqk6NHbvRpx2drfndch74No3sqmyCJZCZaxAFwVmLgcMvh\"",
-// "meters 1601481600010 742480256.000000f32 t0=28805371i64 t1=-231884121i64 t2=940124207i64 t3=176395723i64 t4=L\"7pkY8763Ir0QeugozDbqk6NHbvRpx2drfndch74No3sqmyCJZCZaxAFwVmLgcMvh\"",
-// "meters 1601481600020 -163715920.000000f32 t0=28805371i64 t1=-231884121i64 t2=940124207i64 t3=176395723i64 t4=L\"7pkY8763Ir0QeugozDbqk6NHbvRpx2drfndch74No3sqmyCJZCZaxAFwVmLgcMvh\"",
-// "meters 1601481600030 63386372.000000f32 t0=28805371i64 t1=-231884121i64 t2=940124207i64 t3=176395723i64 t4=L\"7pkY8763Ir0QeugozDbqk6NHbvRpx2drfndch74No3sqmyCJZCZaxAFwVmLgcMvh\"",
-// "meters 1601481600040 -82687824.000000f32 t0=28805371i64 t1=-231884121i64 t2=940124207i64 t3=176395723i64 t4=L\"7pkY8763Ir0QeugozDbqk6NHbvRpx2drfndch74No3sqmyCJZCZaxAFwVmLgcMvh\"",
-// "meters 1601481600000 -863431872.000000f32 t0=-208520225i64 t1=-254703350i64 t2=-1059776552i64 t3=1056267931i64 t4=L\"1zWxWvHNZYailPvb4XxafeA6QvrUrKUf8ECU1axNWvV9ae851s34wqZcMeU2ME7J\"",
-// "meters 1601481600010 742480256.000000f32 t0=-208520225i64 t1=-254703350i64 t2=-1059776552i64 t3=1056267931i64 t4=L\"1zWxWvHNZYailPvb4XxafeA6QvrUrKUf8ECU1axNWvV9ae851s34wqZcMeU2ME7J\"",
-// "meters 1601481600020 -163715920.000000f32 t0=-208520225i64 t1=-254703350i64 t2=-1059776552i64 t3=1056267931i64 t4=L\"1zWxWvHNZYailPvb4XxafeA6QvrUrKUf8ECU1axNWvV9ae851s34wqZcMeU2ME7J\"",
-// "meters 1601481600030 63386372.000000f32 t0=-208520225i64 t1=-254703350i64 t2=-1059776552i64 t3=1056267931i64 t4=L\"1zWxWvHNZYailPvb4XxafeA6QvrUrKUf8ECU1axNWvV9ae851s34wqZcMeU2ME7J\"",
-// "meters 1601481600040 -82687824.000000f32 t0=-208520225i64 t1=-254703350i64 t2=-1059776552i64 t3=1056267931i64 t4=L\"1zWxWvHNZYailPvb4XxafeA6QvrUrKUf8ECU1axNWvV9ae851s34wqZcMeU2ME7J\""
};
- pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql)/sizeof(sql[0]), TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
- ASSERT_NE(taos_errno(pRes), 0);
- taos_free_result(pRes);
+ int ret = TSDB_CODE_SUCCESS;
+ for(int i = 0; i < sizeof(sql)/sizeof(sql[0]); i++){
+ ret = smlParseTelnetLine(info, (void*)sql[i]);
+ if(ret != TSDB_CODE_SUCCESS) break;
+ }
+ ASSERT_NE(ret, 0);
+ smlDestroyInfo(info);
}
TEST(testCase, smlParseTelnetLine_json_error_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
-
- TAOS_RES* pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
- taos_free_result(pRes);
-
- pRes = taos_query(taos, "use sml_db");
- taos_free_result(pRes);
-
- SRequestObj *request = (SRequestObj *)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
- ASSERT_NE(request, nullptr);
-
- STscObj* pTscObj = acquireTscObj(*(int64_t*)taos);
- SSmlHandle *info = smlBuildSmlInfo(pTscObj, request, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
+ SSmlHandle *info = smlBuildSmlInfo(NULL, NULL, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
ASSERT_NE(info, nullptr);
- int32_t ret = 0;
const char *sql[] = {
"[\n"
" {\n"
@@ -1090,24 +611,20 @@ TEST(testCase, smlParseTelnetLine_json_error_Test) {
" },\n"
"]",
};
+
+ int ret = TSDB_CODE_SUCCESS;
for(int i = 0; i < sizeof(sql)/sizeof(sql[0]); i++){
ret = smlParseTelnetLine(info, (void*)sql[i]);
ASSERT_NE(ret, 0);
}
- destroyRequest(request);
smlDestroyInfo(info);
}
TEST(testCase, smlParseTelnetLine_diff_json_type1_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
- TAOS_RES* pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
- taos_free_result(pRes);
-
- pRes = taos_query(taos, "use sml_db");
- taos_free_result(pRes);
+ SSmlHandle *info = smlBuildSmlInfo(NULL, NULL, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
+ ASSERT_NE(info, nullptr);
const char *sql[] = {
"[\n"
@@ -1131,20 +648,19 @@ TEST(testCase, smlParseTelnetLine_diff_json_type1_Test) {
" },\n"
"]",
};
- pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql)/sizeof(sql[0]), TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
- ASSERT_NE(taos_errno(pRes), 0);
- taos_free_result(pRes);
+
+ int ret = TSDB_CODE_SUCCESS;
+ for(int i = 0; i < sizeof(sql)/sizeof(sql[0]); i++){
+ ret = smlParseTelnetLine(info, (void*)sql[i]);
+ if(ret != TSDB_CODE_SUCCESS) break;
+ }
+ ASSERT_NE(ret, 0);
+ smlDestroyInfo(info);
}
TEST(testCase, smlParseTelnetLine_diff_json_type2_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
-
- TAOS_RES* pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
- taos_free_result(pRes);
-
- pRes = taos_query(taos, "use sml_db");
- taos_free_result(pRes);
+ SSmlHandle *info = smlBuildSmlInfo(NULL, NULL, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
+ ASSERT_NE(info, nullptr);
const char *sql[] = {
"[\n"
@@ -1168,388 +684,11 @@ TEST(testCase, smlParseTelnetLine_diff_json_type2_Test) {
" },\n"
"]",
};
- pRes = taos_schemaless_insert(taos, (char **)sql, 0, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
- ASSERT_NE(taos_errno(pRes), 0);
- taos_free_result(pRes);
+ int ret = TSDB_CODE_SUCCESS;
+ for(int i = 0; i < sizeof(sql)/sizeof(sql[0]); i++){
+ ret = smlParseTelnetLine(info, (void*)sql[i]);
+ if(ret != TSDB_CODE_SUCCESS) break;
+ }
+ ASSERT_NE(ret, 0);
+ smlDestroyInfo(info);
}
-
-TEST(testCase, sml_TD15662_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
-
- TAOS_RES *pRes = taos_query(taos, "create database if not exists db_15662 precision 'ns' schemaless 1");
- taos_free_result(pRes);
-
- pRes = taos_query(taos, "use db_15662");
- taos_free_result(pRes);
-
- const char *sql[] = {
- "hetrey c0=f,c1=127i8 1626006833639",
- "hetrey,t1=r c0=f,c1=127i8 1626006833640",
- };
- pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql)/sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS);
- ASSERT_EQ(taos_errno(pRes), 0);
- taos_free_result(pRes);
-}
-
-TEST(testCase, sml_TD15735_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
-
- TAOS_RES* pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
- taos_free_result(pRes);
-
- pRes = taos_query(taos, "use sml_db");
- taos_free_result(pRes);
-
- const char *sql[1] = {
- "{'metric': 'pekoiw', 'timestamp': {'value': 1626006833639000000, 'type': 'ns'}, 'value': {'value': False, 'type': 'bool'}, 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {'value': 9223372036854775807, 'type': 'bigint'}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'binaryTagValue', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}}}",
- };
- pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql)/sizeof(sql[0]), TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS);
- ASSERT_NE(taos_errno(pRes), 0);
- taos_free_result(pRes);
-}
-
-TEST(testCase, sml_TD15742_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
-
- TAOS_RES* pRes = taos_query(taos, "create database if not exists TD15742 schemaless 1");
- taos_free_result(pRes);
-
- pRes = taos_query(taos, "use TD15742");
- taos_free_result(pRes);
-
- const char *sql[] = {
- "test_ms,t0=t c0=f 1626006833641",
- };
- pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql)/sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS);
- ASSERT_EQ(taos_errno(pRes), 0);
- taos_free_result(pRes);
-}
-
-TEST(testCase, sml_params_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
-
- TAOS_RES* pRes = taos_query(taos, "create database if not exists param");
- taos_free_result(pRes);
-
- const char *sql[] = {
- "test_ms,t0=t c0=f 1626006833641",
- };
- TAOS_RES* res = taos_schemaless_insert(taos, (char**)sql, 1, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS);
- ASSERT_EQ(taos_errno(res), TSDB_CODE_PAR_DB_NOT_SPECIFIED);
- taos_free_result(res);
-
- pRes = taos_query(taos, "use param");
- taos_free_result(res);
-
- res = taos_schemaless_insert(taos, (char**)sql, 1, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS);
- ASSERT_EQ(taos_errno(res), TSDB_CODE_SML_INVALID_DB_CONF);
- taos_free_result(res);
-}
-
-TEST(testCase, sml_16384_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
-
- TAOS_RES* pRes = taos_query(taos, "create database if not exists d16384 schemaless 1");
- taos_free_result(pRes);
-
- const char *sql[] = {
- "qelhxo,id=pnnqhsa,t0=t,t1=127i8 c0=t,c1=127i8 1626006833639000000",
- };
-
- pRes = taos_query(taos, "use d16384");
- taos_free_result(pRes);
-
- TAOS_RES* res = taos_schemaless_insert(taos, (char**)sql, 1, TSDB_SML_LINE_PROTOCOL, 0);
- ASSERT_EQ(taos_errno(res), 0);
- taos_free_result(res);
-
- const char *sql1[] = {
- "qelhxo,id=pnnqhsa,t0=t,t1=127i8 c0=f,c1=127i8,c11=L\"ncharColValue\",c10=t 1626006833639000000",
- };
- TAOS_RES* res1 = taos_schemaless_insert(taos, (char**)sql1, 1, TSDB_SML_LINE_PROTOCOL, 0);
- ASSERT_EQ(taos_errno(res1), 0);
- taos_free_result(res1);
-}
-
-TEST(testCase, sml_oom_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
-
- TAOS_RES* pRes = taos_query(taos, "create database if not exists oom schemaless 1");
- taos_free_result(pRes);
-
- const char *sql[] = {
- //"test_ms,t0=t c0=f 1626006833641",
- "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pgxbrbga\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"gviggpmi\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
- "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"cexkarjn\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"rzwwuoxu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
- "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"xphrlkey\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"llsawebj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
- "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"jwpkipff\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"euzzhcvu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"jumhnsvw\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"fnetgdhj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"vrmmpgqe\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"lnpfjapr\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"gvbhmsfr\",t8=L\"ncharTagValue\" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"kydxrxwc\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pfyarryq\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"uxptotap\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"prolhudh\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ttxaxnac\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"dfgvmjmz\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"bloextkn\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"dvjxwzsi\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"aigjomaf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"refbidtf\",t8=L\"ncharTagValue\" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vuanlfpz\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"nbpajxkx\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ktzzauxh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"prcwdjct\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vmbhvjtp\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"liuddtuz\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"pddsktow\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"algldlvl\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"mlmnjgdl\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"oiynpcog\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"wmynbagb\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"asvyulrm\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ohaacrkp\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"ytyejhiq\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"bbznuerb\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"lpebcibw\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"xmqrbafv\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"lnmwpdne\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"jpcsjqun\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"mmxqmavz\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"hhsbgaow\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"uwogyuud\",t8=L\"ncharTagValue\" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ytxpaxnk\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"wouwdvtt\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"iitwikkh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"lgyzuyaq\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"bdtiigxi\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"qpnsvdhw\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"pjxihgvu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"ksxkfetn\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ocukufqs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"qzerxmpe\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"qwcfdyxs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"jldrpmmd\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"lucxlfzc\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"rcewrvya\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"dknvaphs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"nxtxgzdr\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"mbvuugwz\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"uikakffu\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"mwmtqsma\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"bfcxrrpa\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ksajygdj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"vmhhszyv\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"urwjgvut\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"jrvytcxy\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"evqkzygh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"zitdznhg\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"tpqekrxa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"yrrbgjtk\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"bnphiuyq\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"huknehjn\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"iudbxfke\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"fjmolwbn\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"gukzgcjs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"bjvdtlgq\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"phxnesxh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"qgpgckvc\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"yechqtfa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pbouxywy\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"kxtuojyo\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"txaniwlj\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"fixgufrj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"okzvalwq\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"iitawgbn\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"gayvmird\",t8=L\"ncharTagValue\" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"dprkfjph\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"kmuccshq\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vkslsdsd\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"dukccdqk\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"leztxmqf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"kltixbwz\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"xqhkweef\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"idxsimvz\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vbruvcpk\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"uxandqkd\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"dsiosysh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"kxuyanpp\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"wkrktags\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"yvizzpiv\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ddnefben\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"novmfmbc\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"fnusxsfu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"ouerfjap\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"sigognkf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"slvzhede\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"bknerect\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"tmhcdfjb\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"hpnoanpp\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"okmhelnc\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"xcernjin\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"jdmiismg\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"tmnqozrf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"zgwrftkx\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"zyamlwwh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"nuedqcro\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"lpsvyqaa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"mneitsul\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vpleinwb\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"njxuaedy\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"sdgxpqmu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"yjirrebp\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ikqndzfj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"ghnfdxhr\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"hrwczpvo\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"nattumpb\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"zoyfzazn\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"rdwemofy\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"phkgsjeg\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pyhvvjrt\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"zfslyton\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"bxwjzeri\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"uovzzgjv\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"cfjmacvr\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"jefqgzqx\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"njrksxmr\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"mhvabvgn\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"kfekjltr\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"lexfaaby\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"zbblsmwq\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"oqcombkx\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"rcdmhzyw\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"otksuean\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"itbdvowq\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"tswtmhex\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"xoukkzid\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"guangmpq\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"rayxzuky\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"lspwucrv\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pdprzzkf\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"sddqrtza\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"kabndgkx\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"aglnqqxs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"fiwpzmdr\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"hxctooen\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pckjpwyh\",t8=L\"ncharTagValue\" c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ivmvsbai\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"eljdclst\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"rwgdctie\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"zlnthxoz\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"ljtxelle\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"llfggdpy\",t8=L\"ncharTagValue\" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"tvnridze\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"hxjpgube\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"zmldmquq\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"bggqwcoj\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"drksfofm\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"jcsixens\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"cdwnwhaf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"nngpumuq\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"hylgooci\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"cozeyjys\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"lcgpfcsa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"qdtzhtyd\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"txpubynb\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"gbslzbtu\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"buihcpcl\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"ayqezaiq\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"zgkgtilj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"bcjopqif\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"mfzxiaqt\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"xmnlqxoj\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"reyiklyf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"xssuomhk\",t8=L\"ncharTagValue\" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"liazkjll\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"nigjlblo\",t8=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vmojyznk\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"dotkbvrz\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"kuwdyydw\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"slsfqydw\",t8=L\"ncharTagValue\" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"zyironhd\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pktwfhzi\",t8=L\"ncharTagValue\" c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"xybavsvh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"pyrxemvx\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"tlfihwjs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000", "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"neumakmg\",t8=L\"ncharTagValue\" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"wxqingoa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
- };
- pRes = taos_query(taos, "use oom");
- taos_free_result(pRes);
-
- pRes = taos_schemaless_insert(taos, (char**)sql, sizeof(sql)/sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, 0);
- ASSERT_EQ(taos_errno(pRes), 0);
- taos_free_result(pRes);
-}
-
-TEST(testCase, sml_16368_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
-
- TAOS_RES* pRes = taos_query(taos, "create database if not exists d16368 schemaless 1");
- taos_free_result(pRes);
-
- pRes = taos_query(taos, "use d16368");
- taos_free_result(pRes);
-
- const char *sql[] = {
- "[{\"metric\": \"st123456\", \"timestamp\": {\"value\": 1626006833639000, \"type\": \"us\"}, \"value\": 1, \"tags\": {\"t1\": 3, \"t2\": {\"value\": 4, \"type\": \"double\"}, \"t3\": {\"value\": \"t3\", \"type\": \"binary\"}}},\n"
- "{\"metric\": \"st123456\", \"timestamp\": {\"value\": 1626006833739000, \"type\": \"us\"}, \"value\": 2, \"tags\": {\"t1\": {\"value\": 4, \"type\": \"double\"}, \"t3\": {\"value\": \"t4\", \"type\": \"binary\"}, \"t2\": {\"value\": 5, \"type\": \"double\"}, \"t4\": {\"value\": 5, \"type\": \"double\"}}},\n"
- "{\"metric\": \"stb_name\", \"timestamp\": {\"value\": 1626006833639100, \"type\": \"us\"}, \"value\": 3, \"tags\": {\"t2\": {\"value\": 5, \"type\": \"double\"}, \"t3\": {\"value\": \"ste\", \"type\": \"nchar\"}}},\n"
- "{\"metric\": \"stf567890\", \"timestamp\": {\"value\": 1626006833639200, \"type\": \"us\"}, \"value\": 4, \"tags\": {\"t1\": {\"value\": 4, \"type\": \"bigint\"}, \"t3\": {\"value\": \"t4\", \"type\": \"binary\"}, \"t2\": {\"value\": 5, \"type\": \"double\"}, \"t4\": {\"value\": 5, \"type\": \"double\"}}},\n"
- "{\"metric\": \"st123456\", \"timestamp\": {\"value\": 1626006833639300, \"type\": \"us\"}, \"value\": {\"value\": 5, \"type\": \"double\"}, \"tags\": {\"t1\": {\"value\": 4, \"type\": \"double\"}, \"t2\": 5.0, \"t3\": {\"value\": \"t4\", \"type\": \"binary\"}}},\n"
- "{\"metric\": \"stb_name\", \"timestamp\": {\"value\": 1626006833639400, \"type\": \"us\"}, \"value\": {\"value\": 6, \"type\": \"double\"}, \"tags\": {\"t2\": 5.0, \"t3\": {\"value\": \"ste2\", \"type\": \"nchar\"}}},\n"
- "{\"metric\": \"stb_name\", \"timestamp\": {\"value\": 1626006834639400, \"type\": \"us\"}, \"value\": {\"value\": 7, \"type\": \"double\"}, \"tags\": {\"t2\": {\"value\": 5.0, \"type\": \"double\"}, \"t3\": {\"value\": \"ste2\", \"type\": \"nchar\"}}},\n"
- "{\"metric\": \"st123456\", \"timestamp\": {\"value\": 1626006833839006, \"type\": \"us\"}, \"value\": {\"value\": 8, \"type\": \"double\"}, \"tags\": {\"t1\": {\"value\": 4, \"type\": \"double\"}, \"t3\": {\"value\": \"t4\", \"type\": \"binary\"}, \"t2\": {\"value\": 5, \"type\": \"double\"}, \"t4\": {\"value\": 5, \"type\": \"double\"}}},\n"
- "{\"metric\": \"st123456\", \"timestamp\": {\"value\": 1626006833939007, \"type\": \"us\"}, \"value\": {\"value\": 9, \"type\": \"double\"}, \"tags\": {\"t1\": 4, \"t3\": {\"value\": \"t4\", \"type\": \"binary\"}, \"t2\": {\"value\": 5, \"type\": \"double\"}, \"t4\": {\"value\": 5, \"type\": \"double\"}}}]"
- };
- pRes = taos_schemaless_insert(taos, (char**)sql, 0, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_MICRO_SECONDS);
- ASSERT_EQ(taos_errno(pRes), 0);
- taos_free_result(pRes);
-}
-
-TEST(testCase, sml_dup_time_Test) {
- TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- ASSERT_NE(taos, nullptr);
-
- TAOS_RES* pRes = taos_query(taos, "create database if not exists dup_time schemaless 1");
- taos_free_result(pRes);
-
- const char *sql[] = {
- //"test_ms,t0=t c0=f 1626006833641",
- "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=false,c1=1i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"xcxvwjvf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
- "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=T,c1=2i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"fixrzcuq\",c8=L\"ncharColValue\",c9=7u64 1626006834639000000",
- "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=t,c1=3i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"iupzdqub\",c8=L\"ncharColValue\",c9=7u64 1626006835639000000",
- "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=t,c1=4i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"yvvtzzof\",c8=L\"ncharColValue\",c9=7u64 1626006836639000000",
- "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=t,c1=5i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"vbxpilkj\",c8=L\"ncharColValue\",c9=7u64 1626006837639000000"
- };
- pRes = taos_query(taos, "use dup_time");
- taos_free_result(pRes);
-
- pRes = taos_schemaless_insert(taos, (char**)sql, sizeof(sql)/sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, 0);
- ASSERT_EQ(taos_errno(pRes), 0);
- taos_free_result(pRes);
-}
-
-
-TEST(testCase, sml_16960_Test) {
-TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
-ASSERT_NE(taos, nullptr);
-
-TAOS_RES* pRes = taos_query(taos, "create database if not exists d16368 schemaless 1");
-taos_free_result(pRes);
-
-pRes = taos_query(taos, "use d16368");
-taos_free_result(pRes);
-
-const char *sql[] = {
- "[\n"
- "{\n"
- "\"timestamp\":\n"
- "\n"
- "{ \"value\": 1349020800000, \"type\": \"ms\" }\n"
- ",\n"
- "\"value\":\n"
- "\n"
- "{ \"value\": 830525384, \"type\": \"int\" }\n"
- ",\n"
- "\"tags\": {\n"
- "\"id\": \"stb00_0\",\n"
- "\"t0\":\n"
- "\n"
- "{ \"value\": 83972721, \"type\": \"int\" }\n"
- ",\n"
- "\"t1\":\n"
- "\n"
- "{ \"value\": 539147525, \"type\": \"int\" }\n"
- ",\n"
- "\"t2\":\n"
- "\n"
- "{ \"value\": 618258572, \"type\": \"int\" }\n"
- ",\n"
- "\"t3\":\n"
- "\n"
- "{ \"value\": -10536201, \"type\": \"int\" }\n"
- ",\n"
- "\"t4\":\n"
- "\n"
- "{ \"value\": 349227409, \"type\": \"int\" }\n"
- ",\n"
- "\"t5\":\n"
- "\n"
- "{ \"value\": 249347042, \"type\": \"int\" }\n"
- "},\n"
- "\"metric\": \"stb0\"\n"
- "},\n"
- "{\n"
- "\"timestamp\":\n"
- "\n"
- "{ \"value\": 1349020800001, \"type\": \"ms\" }\n"
- ",\n"
- "\"value\":\n"
- "\n"
- "{ \"value\": -588348364, \"type\": \"int\" }\n"
- ",\n"
- "\"tags\": {\n"
- "\"id\": \"stb00_0\",\n"
- "\"t0\":\n"
- "\n"
- "{ \"value\": 83972721, \"type\": \"int\" }\n"
- ",\n"
- "\"t1\":\n"
- "\n"
- "{ \"value\": 539147525, \"type\": \"int\" }\n"
- ",\n"
- "\"t2\":\n"
- "\n"
- "{ \"value\": 618258572, \"type\": \"int\" }\n"
- ",\n"
- "\"t3\":\n"
- "\n"
- "{ \"value\": -10536201, \"type\": \"int\" }\n"
- ",\n"
- "\"t4\":\n"
- "\n"
- "{ \"value\": 349227409, \"type\": \"int\" }\n"
- ",\n"
- "\"t5\":\n"
- "\n"
- "{ \"value\": 249347042, \"type\": \"int\" }\n"
- "},\n"
- "\"metric\": \"stb0\"\n"
- "},\n"
- "{\n"
- "\"timestamp\":\n"
- "\n"
- "{ \"value\": 1349020800002, \"type\": \"ms\" }\n"
- ",\n"
- "\"value\":\n"
- "\n"
- "{ \"value\": -370310823, \"type\": \"int\" }\n"
- ",\n"
- "\"tags\": {\n"
- "\"id\": \"stb00_0\",\n"
- "\"t0\":\n"
- "\n"
- "{ \"value\": 83972721, \"type\": \"int\" }\n"
- ",\n"
- "\"t1\":\n"
- "\n"
- "{ \"value\": 539147525, \"type\": \"int\" }\n"
- ",\n"
- "\"t2\":\n"
- "\n"
- "{ \"value\": 618258572, \"type\": \"int\" }\n"
- ",\n"
- "\"t3\":\n"
- "\n"
- "{ \"value\": -10536201, \"type\": \"int\" }\n"
- ",\n"
- "\"t4\":\n"
- "\n"
- "{ \"value\": 349227409, \"type\": \"int\" }\n"
- ",\n"
- "\"t5\":\n"
- "\n"
- "{ \"value\": 249347042, \"type\": \"int\" }\n"
- "},\n"
- "\"metric\": \"stb0\"\n"
- "},\n"
- "{\n"
- "\"timestamp\":\n"
- "\n"
- "{ \"value\": 1349020800003, \"type\": \"ms\" }\n"
- ",\n"
- "\"value\":\n"
- "\n"
- "{ \"value\": -811250191, \"type\": \"int\" }\n"
- ",\n"
- "\"tags\": {\n"
- "\"id\": \"stb00_0\",\n"
- "\"t0\":\n"
- "\n"
- "{ \"value\": 83972721, \"type\": \"int\" }\n"
- ",\n"
- "\"t1\":\n"
- "\n"
- "{ \"value\": 539147525, \"type\": \"int\" }\n"
- ",\n"
- "\"t2\":\n"
- "\n"
- "{ \"value\": 618258572, \"type\": \"int\" }\n"
- ",\n"
- "\"t3\":\n"
- "\n"
- "{ \"value\": -10536201, \"type\": \"int\" }\n"
- ",\n"
- "\"t4\":\n"
- "\n"
- "{ \"value\": 349227409, \"type\": \"int\" }\n"
- ",\n"
- "\"t5\":\n"
- "\n"
- "{ \"value\": 249347042, \"type\": \"int\" }\n"
- "},\n"
- "\"metric\": \"stb0\"\n"
- "},\n"
- "{\n"
- "\"timestamp\":\n"
- "\n"
- "{ \"value\": 1349020800004, \"type\": \"ms\" }\n"
- ",\n"
- "\"value\":\n"
- "\n"
- "{ \"value\": -330340558, \"type\": \"int\" }\n"
- ",\n"
- "\"tags\": {\n"
- "\"id\": \"stb00_0\",\n"
- "\"t0\":\n"
- "\n"
- "{ \"value\": 83972721, \"type\": \"int\" }\n"
- ",\n"
- "\"t1\":\n"
- "\n"
- "{ \"value\": 539147525, \"type\": \"int\" }\n"
- ",\n"
- "\"t2\":\n"
- "\n"
- "{ \"value\": 618258572, \"type\": \"int\" }\n"
- ",\n"
- "\"t3\":\n"
- "\n"
- "{ \"value\": -10536201, \"type\": \"int\" }\n"
- ",\n"
- "\"t4\":\n"
- "\n"
- "{ \"value\": 349227409, \"type\": \"int\" }\n"
- ",\n"
- "\"t5\":\n"
- "\n"
- "{ \"value\": 249347042, \"type\": \"int\" }\n"
- "},\n"
- "\"metric\": \"stb0\"\n"
- "}\n"
- "]"
-};
-
-pRes = taos_schemaless_insert(taos, (char**)sql, sizeof(sql)/sizeof(sql[0]), TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS);
-ASSERT_EQ(taos_errno(pRes), 0);
-taos_free_result(pRes);
-}
-*/
diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c
index faee6cc2fa..f8e64a3409 100644
--- a/source/common/src/tdatablock.c
+++ b/source/common/src/tdatablock.c
@@ -16,65 +16,9 @@
#define _DEFAULT_SOURCE
#include "tdatablock.h"
#include "tcompare.h"
-#include "tglobal.h"
#include "tlog.h"
#include "tname.h"
-int32_t taosGetFqdnPortFromEp(const char* ep, SEp* pEp) {
- pEp->port = 0;
- strcpy(pEp->fqdn, ep);
-
- char* temp = strchr(pEp->fqdn, ':');
- if (temp) {
- *temp = 0;
- pEp->port = atoi(temp + 1);
- }
-
- if (pEp->port == 0) {
- pEp->port = tsServerPort;
- }
-
- return 0;
-}
-
-void addEpIntoEpSet(SEpSet* pEpSet, const char* fqdn, uint16_t port) {
- if (pEpSet == NULL || fqdn == NULL || strlen(fqdn) == 0) {
- return;
- }
-
- int32_t index = pEpSet->numOfEps;
- tstrncpy(pEpSet->eps[index].fqdn, fqdn, tListLen(pEpSet->eps[index].fqdn));
- pEpSet->eps[index].port = port;
- pEpSet->numOfEps += 1;
-}
-
-bool isEpsetEqual(const SEpSet* s1, const SEpSet* s2) {
- if (s1->numOfEps != s2->numOfEps || s1->inUse != s2->inUse) {
- return false;
- }
-
- for (int32_t i = 0; i < s1->numOfEps; i++) {
- if (s1->eps[i].port != s2->eps[i].port || strncmp(s1->eps[i].fqdn, s2->eps[i].fqdn, TSDB_FQDN_LEN) != 0)
- return false;
- }
- return true;
-}
-
-void updateEpSet_s(SCorEpSet* pEpSet, SEpSet* pNewEpSet) {
- taosCorBeginWrite(&pEpSet->version);
- pEpSet->epSet = *pNewEpSet;
- taosCorEndWrite(&pEpSet->version);
-}
-
-SEpSet getEpSet_s(SCorEpSet* pEpSet) {
- SEpSet ep = {0};
- taosCorBeginRead(&pEpSet->version);
- ep = pEpSet->epSet;
- taosCorEndRead(&pEpSet->version);
-
- return ep;
-}
-
int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRows) {
ASSERT(pColumnInfoData != NULL);
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
@@ -1713,8 +1657,9 @@ void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag) {
size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock);
int32_t rows = pDataBlock->info.rows;
- printf("%s |block type %d |child id %d|group id %" PRIu64 "\n", flag, (int32_t)pDataBlock->info.type,
- pDataBlock->info.childId, pDataBlock->info.groupId);
+ printf("%s |block ver %" PRIi64 " |block type %d |child id %d|group id %" PRIu64 "\n", flag,
+ pDataBlock->info.version, (int32_t)pDataBlock->info.type, pDataBlock->info.childId,
+ pDataBlock->info.groupId);
for (int32_t j = 0; j < rows; j++) {
printf("%s |", flag);
for (int32_t k = 0; k < numOfCols; k++) {
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index ce09b83fae..0cc4e31aed 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -124,9 +124,6 @@ int32_t tsMinIntervalTime = 1;
int32_t tsQueryBufferSize = -1;
int64_t tsQueryBufferSizeBytes = -1;
-// in retrieve blocking model, the retrieve threads will wait for the completion of the query processing.
-bool tsRetrieveBlockingModel = false;
-
// tsdb config
// For backward compatibility
bool tsdbForceKeepFile = false;
@@ -296,6 +293,7 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "smaDebugFlag", smaDebugFlag, 0, 255, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "idxDebugFlag", idxDebugFlag, 0, 255, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "tdbDebugFlag", tdbDebugFlag, 0, 255, 0) != 0) return -1;
+ if (cfgAddInt32(pCfg, "metaDebugFlag", metaDebugFlag, 0, 255, 0) != 0) return -1;
return 0;
}
@@ -362,7 +360,6 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "countAlwaysReturnValue", tsCountAlwaysReturnValue, 0, 1, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "queryBufferSize", tsQueryBufferSize, -1, 500000000000, 0) != 0) return -1;
- if (cfgAddBool(pCfg, "retrieveBlockingModel", tsRetrieveBlockingModel, 0) != 0) return -1;
if (cfgAddBool(pCfg, "printAuth", tsPrintAuth, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "multiProcess", tsMultiProcess, 0, 2, 0) != 0) return -1;
@@ -476,6 +473,7 @@ static void taosSetServerLogCfg(SConfig *pCfg) {
smaDebugFlag = cfgGetItem(pCfg, "smaDebugFlag")->i32;
idxDebugFlag = cfgGetItem(pCfg, "idxDebugFlag")->i32;
tdbDebugFlag = cfgGetItem(pCfg, "tdbDebugFlag")->i32;
+ metaDebugFlag = cfgGetItem(pCfg, "metaDebugFlag")->i32;
}
static int32_t taosSetClientCfg(SConfig *pCfg) {
@@ -547,7 +545,6 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsMaxNumOfDistinctResults = cfgGetItem(pCfg, "maxNumOfDistinctRes")->i32;
tsCountAlwaysReturnValue = cfgGetItem(pCfg, "countAlwaysReturnValue")->i32;
tsQueryBufferSize = cfgGetItem(pCfg, "queryBufferSize")->i32;
- tsRetrieveBlockingModel = cfgGetItem(pCfg, "retrieveBlockingModel")->bval;
tsPrintAuth = cfgGetItem(pCfg, "printAuth")->bval;
tsMultiProcess = cfgGetItem(pCfg, "multiProcess")->bval;
@@ -832,9 +829,7 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
break;
}
case 'r': {
- if (strcasecmp("retrieveBlockingModel", name) == 0) {
- tsRetrieveBlockingModel = cfgGetItem(pCfg, "retrieveBlockingModel")->bval;
- } else if (strcasecmp("rpcQueueMemoryAllowed", name) == 0) {
+ if (strcasecmp("rpcQueueMemoryAllowed", name) == 0) {
tsRpcQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64;
} else if (strcasecmp("rpcDebugFlag", name) == 0) {
rpcDebugFlag = cfgGetItem(pCfg, "rpcDebugFlag")->i32;
@@ -1100,12 +1095,12 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
const char *options[] = {
"dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag",
"tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag",
- "tmrDebugFlag", "uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag",
+ "tmrDebugFlag", "uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag",
};
int32_t *optionVars[] = {
&dDebugFlag, &vDebugFlag, &mDebugFlag, &wDebugFlag, &sDebugFlag, &tsdbDebugFlag,
&tqDebugFlag, &fsDebugFlag, &udfDebugFlag, &smaDebugFlag, &idxDebugFlag, &tdbDebugFlag,
- &tmrDebugFlag, &uDebugFlag, &smaDebugFlag, &rpcDebugFlag, &qDebugFlag,
+ &tmrDebugFlag, &uDebugFlag, &smaDebugFlag, &rpcDebugFlag, &qDebugFlag, &metaDebugFlag,
};
int32_t optionSize = tListLen(options);
@@ -1152,5 +1147,6 @@ void taosSetAllDebugFlag(int32_t flag) {
taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag);
taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag);
taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag);
+ taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag);
uInfo("all debug flag are set to %d", flag);
}
diff --git a/source/common/src/tmisce.c b/source/common/src/tmisce.c
new file mode 100644
index 0000000000..2290c5d45f
--- /dev/null
+++ b/source/common/src/tmisce.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#define _DEFAULT_SOURCE
+#include "tdatablock.h"
+#include "tglobal.h"
+#include "tlog.h"
+#include "tname.h"
+
+int32_t taosGetFqdnPortFromEp(const char* ep, SEp* pEp) {
+ pEp->port = 0;
+ strcpy(pEp->fqdn, ep);
+
+ char* temp = strchr(pEp->fqdn, ':');
+ if (temp) {
+ *temp = 0;
+ pEp->port = atoi(temp + 1);
+ }
+
+ if (pEp->port == 0) {
+ pEp->port = tsServerPort;
+ }
+
+ return 0;
+}
+
+void addEpIntoEpSet(SEpSet* pEpSet, const char* fqdn, uint16_t port) {
+ if (pEpSet == NULL || fqdn == NULL || strlen(fqdn) == 0) {
+ return;
+ }
+
+ int32_t index = pEpSet->numOfEps;
+ tstrncpy(pEpSet->eps[index].fqdn, fqdn, tListLen(pEpSet->eps[index].fqdn));
+ pEpSet->eps[index].port = port;
+ pEpSet->numOfEps += 1;
+}
+
+bool isEpsetEqual(const SEpSet* s1, const SEpSet* s2) {
+ if (s1->numOfEps != s2->numOfEps || s1->inUse != s2->inUse) {
+ return false;
+ }
+
+ for (int32_t i = 0; i < s1->numOfEps; i++) {
+ if (s1->eps[i].port != s2->eps[i].port || strncmp(s1->eps[i].fqdn, s2->eps[i].fqdn, TSDB_FQDN_LEN) != 0)
+ return false;
+ }
+ return true;
+}
+
+void updateEpSet_s(SCorEpSet* pEpSet, SEpSet* pNewEpSet) {
+ taosCorBeginWrite(&pEpSet->version);
+ pEpSet->epSet = *pNewEpSet;
+ taosCorEndWrite(&pEpSet->version);
+}
+
+SEpSet getEpSet_s(SCorEpSet* pEpSet) {
+ SEpSet ep = {0};
+ taosCorBeginRead(&pEpSet->version);
+ ep = pEpSet->epSet;
+ taosCorEndRead(&pEpSet->version);
+
+ return ep;
+}
+
+
diff --git a/source/common/src/tname.c b/source/common/src/tname.c
index c5bebf3630..b6f49a7219 100644
--- a/source/common/src/tname.c
+++ b/source/common/src/tname.c
@@ -20,34 +20,6 @@
#define VALID_NAME_TYPE(x) ((x) == TSDB_DB_NAME_T || (x) == TSDB_TABLE_NAME_T)
-bool tscValidateTableNameLength(size_t len) { return len < TSDB_TABLE_NAME_LEN; }
-
-#if 0
-// TODO refactor
-SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFilters) {
- if (numOfFilters == 0 || src == NULL) {
- assert(src == NULL);
- return NULL;
- }
-
- SColumnFilterInfo* pFilter = taosMemoryCalloc(1, numOfFilters * sizeof(SColumnFilterInfo));
-
- memcpy(pFilter, src, sizeof(SColumnFilterInfo) * numOfFilters);
- for (int32_t j = 0; j < numOfFilters; ++j) {
- if (pFilter[j].filterstr) {
- size_t len = (size_t) pFilter[j].len + 1 * TSDB_NCHAR_SIZE;
- pFilter[j].pz = (int64_t) taosMemoryCalloc(1, len);
-
- memcpy((char*)pFilter[j].pz, (char*)src[j].pz, (size_t) pFilter[j].len);
- }
- }
-
- assert(src->filterstr == 0 || src->filterstr == 1);
- assert(!(src->lowerRelOptr == 0 && src->upperRelOptr == 0));
-
- return pFilter;
-}
-#endif
#if 0
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision) {
if (slidingTime == 0) {
diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c
index 013cc05c65..34c3b40556 100644
--- a/source/dnode/mgmt/exe/dmMain.c
+++ b/source/dnode/mgmt/exe/dmMain.c
@@ -27,6 +27,9 @@
#define DM_VERSION "Print program version."
#define DM_EMAIL ""
static struct {
+#ifdef WINDOWS
+ bool winServiceMode;
+#endif
bool dumpConfig;
bool generateGrant;
bool printAuth;
@@ -93,6 +96,10 @@ static int32_t dmParseArgs(int32_t argc, char const *argv[]) {
global.dumpConfig = true;
} else if (strcmp(argv[i], "-V") == 0) {
global.printVersion = true;
+ #ifdef WINDOWS
+ } else if (strcmp(argv[i], "--win_service") == 0) {
+ global.winServiceMode = true;
+ #endif
} else if (strcmp(argv[i], "-e") == 0) {
global.envCmd[cmdEnvIndex] = argv[++i];
cmdEnvIndex++;
@@ -169,6 +176,18 @@ int main(int argc, char const *argv[]) {
return -1;
}
+#ifdef WINDOWS
+ int mainWindows(int argc,char** argv);
+ if (global.winServiceMode) {
+ stratWindowsService(mainWindows);
+ } else {
+ return mainWindows(argc, argv);
+ }
+ return 0;
+}
+int mainWindows(int argc,char** argv) {
+#endif
+
if (global.generateGrant) {
dmGenerateGrant();
taosCleanupArgs();
diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c
index 3f90f087fd..fc5e20ef28 100644
--- a/source/dnode/mnode/impl/src/mndDnode.c
+++ b/source/dnode/mnode/impl/src/mndDnode.c
@@ -788,9 +788,9 @@ _OVER:
static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
const char *options[] = {
- "debugFlag", "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag",
- "tsdbDebugFlag", "tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag",
- "tdbDebugFlag", "tmrDebugFlag", "uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag",
+ "debugFlag", "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag",
+ "tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag",
+ "uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag",
};
int32_t optionSize = tListLen(options);
diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt
index e9e20912c5..3d8d46a0fb 100644
--- a/source/dnode/vnode/CMakeLists.txt
+++ b/source/dnode/vnode/CMakeLists.txt
@@ -31,6 +31,7 @@ target_sources(
"src/sma/smaOpen.c"
"src/sma/smaCommit.c"
"src/sma/smaRollup.c"
+ "src/sma/smaSnapshot.c"
"src/sma/smaTimeRange.c"
# tsdb
diff --git a/source/dnode/vnode/src/inc/sma.h b/source/dnode/vnode/src/inc/sma.h
index 217d40e3aa..c825ab6731 100644
--- a/source/dnode/vnode/src/inc/sma.h
+++ b/source/dnode/vnode/src/inc/sma.h
@@ -209,6 +209,9 @@ int32_t tdProcessTSmaGetDaysImpl(SVnodeCfg *pCfg, void *pCont, uint32_t contLen,
// smaFileUtil ================
+typedef struct SQTaskFReader SQTaskFReader;
+typedef struct SQTaskFWriter SQTaskFWriter;
+
#define TD_FILE_HEAD_SIZE 512
typedef struct STFInfo STFInfo;
diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h
index d8c84e952b..fdf970611c 100644
--- a/source/dnode/vnode/src/inc/tsdb.h
+++ b/source/dnode/vnode/src/inc/tsdb.h
@@ -97,7 +97,6 @@ int32_t tRowMergerGetRow(SRowMerger *pMerger, STSRow **ppRow);
// TABLEID
int32_t tTABLEIDCmprFn(const void *p1, const void *p2);
// TSDBKEY
-int32_t tsdbKeyCmprFn(const void *p1, const void *p2);
#define MIN_TSDBKEY(KEY1, KEY2) ((tsdbKeyCmprFn(&(KEY1), &(KEY2)) < 0) ? (KEY1) : (KEY2))
#define MAX_TSDBKEY(KEY1, KEY2) ((tsdbKeyCmprFn(&(KEY1), &(KEY2)) > 0) ? (KEY1) : (KEY2))
// SBlockCol
@@ -558,6 +557,26 @@ struct STsdbReadSnap {
STsdbFS fs;
};
+// ========== inline functions ==========
+static FORCE_INLINE int32_t tsdbKeyCmprFn(const void *p1, const void *p2) {
+ TSDBKEY *pKey1 = (TSDBKEY *)p1;
+ TSDBKEY *pKey2 = (TSDBKEY *)p2;
+
+ if (pKey1->ts < pKey2->ts) {
+ return -1;
+ } else if (pKey1->ts > pKey2->ts) {
+ return 1;
+ }
+
+ if (pKey1->version < pKey2->version) {
+ return -1;
+ } else if (pKey1->version > pKey2->version) {
+ return 1;
+ }
+
+ return 0;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h
index 9ed2b25fdf..b1da5a7883 100644
--- a/source/dnode/vnode/src/inc/vnodeInt.h
+++ b/source/dnode/vnode/src/inc/vnodeInt.h
@@ -62,6 +62,8 @@ typedef struct SMetaSnapReader SMetaSnapReader;
typedef struct SMetaSnapWriter SMetaSnapWriter;
typedef struct STsdbSnapReader STsdbSnapReader;
typedef struct STsdbSnapWriter STsdbSnapWriter;
+typedef struct SRsmaSnapReader SRsmaSnapReader;
+typedef struct SRsmaSnapWriter SRsmaSnapWriter;
typedef struct SSnapDataHdr SSnapDataHdr;
#define VNODE_META_DIR "meta"
@@ -196,13 +198,21 @@ int32_t metaSnapWriterOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapWr
int32_t metaSnapWrite(SMetaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
int32_t metaSnapWriterClose(SMetaSnapWriter** ppWriter, int8_t rollback);
// STsdbSnapReader ========================================
-int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapReader** ppReader);
+int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, int8_t type, STsdbSnapReader** ppReader);
int32_t tsdbSnapReaderClose(STsdbSnapReader** ppReader);
int32_t tsdbSnapRead(STsdbSnapReader* pReader, uint8_t** ppData);
// STsdbSnapWriter ========================================
int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWriter** ppWriter);
int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback);
+// SRsmaSnapReader ========================================
+int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapReader** ppReader);
+int32_t rsmaSnapReaderClose(SRsmaSnapReader** ppReader);
+int32_t rsmaSnapRead(SRsmaSnapReader* pReader, uint8_t** ppData);
+// SRsmaSnapWriter ========================================
+int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapWriter** ppWriter);
+int32_t rsmaSnapWrite(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
+int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback);
typedef struct {
int8_t streamType; // sma or other
@@ -314,6 +324,15 @@ struct SSma {
// sma
void smaHandleRes(void* pVnode, int64_t smaId, const SArray* data);
+enum {
+ SNAP_DATA_META = 0,
+ SNAP_DATA_TSDB = 1,
+ SNAP_DATA_DEL = 2,
+ SNAP_DATA_RSMA1 = 3,
+ SNAP_DATA_RSMA2 = 4,
+ SNAP_DATA_QTASK = 5,
+};
+
struct SSnapDataHdr {
int8_t type;
int64_t index;
diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c
index 195723562c..396a58c988 100644
--- a/source/dnode/vnode/src/meta/metaOpen.c
+++ b/source/dnode/vnode/src/meta/metaOpen.c
@@ -183,11 +183,11 @@ int metaClose(SMeta *pMeta) {
int32_t metaRLock(SMeta *pMeta) {
int32_t ret = 0;
- metaDebug("meta rlock %p B", &pMeta->lock);
+ metaTrace("meta rlock %p B", &pMeta->lock);
ret = taosThreadRwlockRdlock(&pMeta->lock);
- metaDebug("meta rlock %p E", &pMeta->lock);
+ metaTrace("meta rlock %p E", &pMeta->lock);
return ret;
}
@@ -195,11 +195,11 @@ int32_t metaRLock(SMeta *pMeta) {
int32_t metaWLock(SMeta *pMeta) {
int32_t ret = 0;
- metaDebug("meta wlock %p B", &pMeta->lock);
+ metaTrace("meta wlock %p B", &pMeta->lock);
ret = taosThreadRwlockWrlock(&pMeta->lock);
- metaDebug("meta wlock %p E", &pMeta->lock);
+ metaTrace("meta wlock %p E", &pMeta->lock);
return ret;
}
@@ -207,11 +207,11 @@ int32_t metaWLock(SMeta *pMeta) {
int32_t metaULock(SMeta *pMeta) {
int32_t ret = 0;
- metaDebug("meta ulock %p B", &pMeta->lock);
+ metaTrace("meta ulock %p B", &pMeta->lock);
ret = taosThreadRwlockUnlock(&pMeta->lock);
- metaDebug("meta ulock %p E", &pMeta->lock);
+ metaTrace("meta ulock %p E", &pMeta->lock);
return ret;
}
diff --git a/source/dnode/vnode/src/meta/metaSnapshot.c b/source/dnode/vnode/src/meta/metaSnapshot.c
index 85261d302e..bb20a1a7ff 100644
--- a/source/dnode/vnode/src/meta/metaSnapshot.c
+++ b/source/dnode/vnode/src/meta/metaSnapshot.c
@@ -109,7 +109,7 @@ int32_t metaSnapRead(SMetaSnapReader* pReader, uint8_t** ppData) {
}
SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData);
- pHdr->type = 0; // TODO: use macro
+ pHdr->type = SNAP_DATA_META;
pHdr->size = nData;
memcpy(pHdr->data, pData, nData);
@@ -145,6 +145,8 @@ int32_t metaSnapWriterOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapWr
pWriter->sver = sver;
pWriter->ever = ever;
+ metaBegin(pMeta);
+
*ppWriter = pWriter;
return code;
diff --git a/source/dnode/vnode/src/sma/smaSnapshot.c b/source/dnode/vnode/src/sma/smaSnapshot.c
index 21dfd8a32d..c5cb816887 100644
--- a/source/dnode/vnode/src/sma/smaSnapshot.c
+++ b/source/dnode/vnode/src/sma/smaSnapshot.c
@@ -49,7 +49,8 @@ int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapRead
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (pSma->pRSmaTsdb[i]) {
- code = tsdbSnapReaderOpen(pSma->pRSmaTsdb[i], sver, ever, &pReader->pDataReader[i]);
+ code = tsdbSnapReaderOpen(pSma->pRSmaTsdb[i], sver, ever, i == 0 ? SNAP_DATA_RSMA1 : SNAP_DATA_RSMA2,
+ &pReader->pDataReader[i]);
if (code < 0) {
goto _err;
}
@@ -221,10 +222,9 @@ int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback) {
}
}
+ smaInfo("vgId:%d vnode snapshot rsma writer close succeed", SMA_VID(pWriter->pSma));
taosMemoryFree(pWriter);
*ppWriter = NULL;
-
- smaInfo("vgId:%d vnode snapshot rsma writer close succeed", SMA_VID(pWriter->pSma));
return code;
_err:
@@ -245,15 +245,17 @@ int32_t rsmaSnapWrite(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData)
code = tsdbSnapWrite(pWriter->pDataWriter[1], pData, nData);
} else if (pHdr->type == SNAP_DATA_QTASK) {
code = rsmaSnapWriteQTaskInfo(pWriter, pData, nData);
+ } else {
+ ASSERT(0);
}
if (code < 0) goto _err;
_exit:
- smaInfo("vgId:%d rsma snapshot write for data %" PRIi8 " succeed", SMA_VID(pWriter->pSma), pHdr->type);
+ smaInfo("vgId:%d rsma snapshot write for data type %" PRIi8 " succeed", SMA_VID(pWriter->pSma), pHdr->type);
return code;
_err:
- smaError("vgId:%d rsma snapshot write for data %" PRIi8 " failed since %s", SMA_VID(pWriter->pSma), pHdr->type,
+ smaError("vgId:%d rsma snapshot write for data type %" PRIi8 " failed since %s", SMA_VID(pWriter->pSma), pHdr->type,
tstrerror(code));
return code;
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c
index 4e6a450d35..f03b02af27 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCache.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCache.c
@@ -175,7 +175,7 @@ int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, ST
cacheRow = (STSRow *)taosLRUCacheValue(pCache, h);
if (row->ts >= cacheRow->ts) {
if (row->ts == cacheRow->ts) {
- STSRow *mergedRow;
+ STSRow *mergedRow = NULL;
SRowMerger merger = {0};
STSchema *pTSchema = metaGetTbTSchema(pTsdb->pVnode->pMeta, uid, -1);
diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c
index 194bd2e924..24f066f703 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCommit.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c
@@ -307,7 +307,11 @@ static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) {
fLast = (SLastFile){.commitID = pCommitter->commitID, .size = 0};
fSma = *pRSet->pSmaF;
} else {
- wSet.diskId = (SDiskID){.level = 0, .id = 0};
+ SDiskID did = {0};
+
+ tfsAllocDisk(pTsdb->pVnode->pTfs, 0, &did);
+
+ wSet.diskId = did;
wSet.fid = pCommitter->commitFid;
fHead = (SHeadFile){.commitID = pCommitter->commitID, .offset = 0, .size = 0};
fData = (SDataFile){.commitID = pCommitter->commitID, .size = 0};
diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c
index 03985654f8..072d15d715 100644
--- a/source/dnode/vnode/src/tsdb/tsdbRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbRead.c
@@ -145,7 +145,8 @@ static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanI
SRowMerger* pMerger);
static int32_t doMergeRowsInBuf(SIterInfo* pIter, int64_t ts, SArray* pDelList, SRowMerger* pMerger,
STsdbReader* pReader);
-static int32_t doAppendOneRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow);
+static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow);
+static int32_t doAppendRowFromBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData, int32_t rowIndex);
static void setComposedBlockFlag(STsdbReader* pReader, bool composed);
static void updateSchema(TSDBROW* pRow, uint64_t uid, STsdbReader* pReader);
static bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order);
@@ -691,16 +692,13 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter);
SBlock* pBlock = getCurrentBlock(pBlockIter);
SSDataBlock* pResBlock = pReader->pResBlock;
- int32_t numOfCols = blockDataGetNumOfCols(pResBlock);
+ int32_t numOfOutputCols = blockDataGetNumOfCols(pResBlock);
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
- int64_t st = taosGetTimestampUs();
-
SColVal cv = {0};
- int32_t colIndex = 0;
-
+ int64_t st = taosGetTimestampUs();
bool asc = ASCENDING_TRAVERSE(pReader->order);
int32_t step = asc ? 1 : -1;
@@ -724,7 +722,9 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
i += 1;
}
- while (i < numOfCols && colIndex < taosArrayGetSize(pBlockData->aIdx)) {
+ int32_t colIndex = 0;
+ int32_t num = taosArrayGetSize(pBlockData->aIdx);
+ while (i < numOfOutputCols && colIndex < num) {
rowIndex = 0;
pColData = taosArrayGet(pResBlock->pDataBlock, i);
@@ -744,7 +744,7 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
i += 1;
}
- while (i < numOfCols) {
+ while (i < numOfOutputCols) {
pColData = taosArrayGet(pResBlock->pDataBlock, i);
colDataAppendNNULL(pColData, 0, remain);
i += 1;
@@ -1256,7 +1256,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
}
tRowMergerClear(&merge);
- doAppendOneRow(pReader->pResBlock, pReader, pTSRow);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
taosMemoryFree(pTSRow);
return TSDB_CODE_SUCCESS;
@@ -1300,7 +1300,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
}
tRowMergerGetRow(&merge, &pTSRow);
- doAppendOneRow(pReader->pResBlock, pReader, pTSRow);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
return TSDB_CODE_SUCCESS;
} else { // key > ik.ts || key > k.ts
ASSERT(key != ik.ts);
@@ -1309,7 +1309,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
// [4] ik.ts < k.ts <= key
if (ik.ts < k.ts) {
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader);
- doAppendOneRow(pReader->pResBlock, pReader, pTSRow);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
return TSDB_CODE_SUCCESS;
}
@@ -1317,7 +1317,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
// [6] k.ts < ik.ts <= key
if (k.ts < ik.ts) {
doMergeMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, &pTSRow, pReader);
- doAppendOneRow(pReader->pResBlock, pReader, pTSRow);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
return TSDB_CODE_SUCCESS;
}
@@ -1326,7 +1326,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
ASSERT(key > ik.ts && key > k.ts);
doMergeMemIMemRows(pRow, piRow, pBlockScanInfo, pReader, &pTSRow);
- doAppendOneRow(pReader->pResBlock, pReader, pTSRow);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
return TSDB_CODE_SUCCESS;
}
}
@@ -1350,7 +1350,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
}
tRowMergerGetRow(&merge, &pTSRow);
- doAppendOneRow(pReader->pResBlock, pReader, pTSRow);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
return TSDB_CODE_SUCCESS;
} else {
ASSERT(ik.ts != k.ts); // this case has been included in the previous if branch
@@ -1359,7 +1359,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
// [4] ik.ts > key >= k.ts
if (ik.ts > key) {
doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader);
- doAppendOneRow(pReader->pResBlock, pReader, pTSRow);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
return TSDB_CODE_SUCCESS;
}
@@ -1371,7 +1371,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
tRowMergerGetRow(&merge, &pTSRow);
- doAppendOneRow(pReader->pResBlock, pReader, pTSRow);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
return TSDB_CODE_SUCCESS;
}
@@ -1383,7 +1383,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
tRowMerge(&merge, &fRow);
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
tRowMergerGetRow(&merge, &pTSRow);
- doAppendOneRow(pReader->pResBlock, pReader, pTSRow);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
return TSDB_CODE_SUCCESS;
}
}
@@ -1438,6 +1438,21 @@ static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanI
}
// imem & mem are all empty, only file exist
+
+ // opt version
+ // 1. it is not a border point
+ // 2. the direct next point is not an duplicated timestamp
+ if ((pDumpInfo->rowIndex < pDumpInfo->totalRows - 1 && pReader->order == TSDB_ORDER_ASC) ||
+ (pDumpInfo->rowIndex > 0 && pReader->order == TSDB_ORDER_DESC)) {
+ int32_t step = pReader->order == TSDB_ORDER_ASC? 1:-1;
+ int64_t nextKey = pBlockData->aTSKEY[pDumpInfo->rowIndex + step];
+ if (nextKey != key) { // merge is not needed
+ doAppendRowFromBlock(pReader->pResBlock, pReader, pBlockData, pDumpInfo->rowIndex);
+ pDumpInfo->rowIndex += step;
+ return TSDB_CODE_SUCCESS;
+ }
+ }
+
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
STSRow* pTSRow = NULL;
@@ -1446,7 +1461,7 @@ static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanI
tRowMergerInit(&merge, &fRow, pReader->pSchema);
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
tRowMergerGetRow(&merge, &pTSRow);
- doAppendOneRow(pReader->pResBlock, pReader, pTSRow);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow);
taosMemoryFree(pTSRow);
tRowMergerClear(&merge);
@@ -2201,7 +2216,7 @@ int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pSc
int32_t step = asc ? 1 : -1;
pDumpInfo->rowIndex += step;
- if (pDumpInfo->rowIndex <= pBlockData->nRow - 1) {
+ if ((pDumpInfo->rowIndex <= pBlockData->nRow - 1 && asc) ||(pDumpInfo->rowIndex >= 0 && !asc)) {
pDumpInfo->rowIndex =
doMergeRowsInFileBlockImpl(pBlockData, pDumpInfo->rowIndex, key, pMerger, &pReader->verRange, step);
}
@@ -2325,7 +2340,7 @@ int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pR
return TSDB_CODE_SUCCESS;
}
-int32_t doAppendOneRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow) {
+int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow) {
int32_t numOfRows = pBlock->info.rows;
int32_t numOfCols = (int32_t)taosArrayGetSize(pBlock->pDataBlock);
@@ -2369,6 +2384,47 @@ int32_t doAppendOneRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow
return TSDB_CODE_SUCCESS;
}
+int32_t doAppendRowFromBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData, int32_t rowIndex) {
+ int32_t i = 0, j = 0;
+ int32_t outputRowIndex = pResBlock->info.rows;
+
+ SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
+
+ SColumnInfoData* pColData = taosArrayGet(pResBlock->pDataBlock, i);
+ if (pColData->info.colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
+ colDataAppendInt64(pColData, outputRowIndex, &pBlockData->aTSKEY[rowIndex]);
+ i += 1;
+ }
+
+ SColVal cv = {0};
+ int32_t numOfInputCols = taosArrayGetSize(pBlockData->aIdx);
+ int32_t numOfOutputCols = blockDataGetNumOfCols(pResBlock);
+
+ while(i < numOfOutputCols && j < numOfInputCols) {
+ SColumnInfoData* pCol = taosArrayGet(pResBlock->pDataBlock, i);
+ SColData* pData = tBlockDataGetColDataByIdx(pBlockData, j);
+
+ if (pData->cid == pCol->info.colId) {
+ tColDataGetValue(pData, rowIndex, &cv);
+ doCopyColVal(pCol, outputRowIndex, i, &cv, pSupInfo);
+ j += 1;
+ } else { // the specified column does not exist in file block, fill with null data
+ colDataAppendNULL(pCol, outputRowIndex);
+ }
+
+ i += 1;
+ }
+
+ while (i < numOfOutputCols) {
+ SColumnInfoData* pCol = taosArrayGet(pResBlock->pDataBlock, i);
+ colDataAppendNULL(pCol, outputRowIndex);
+ i += 1;
+ }
+
+ pResBlock->info.rows += 1;
+ return TSDB_CODE_SUCCESS;
+}
+
int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t endKey, int32_t capacity,
STsdbReader* pReader) {
SSDataBlock* pBlock = pReader->pResBlock;
@@ -2380,7 +2436,7 @@ int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t e
break;
}
- doAppendOneRow(pBlock, pReader, pTSRow);
+ doAppendRowFromTSRow(pBlock, pReader, pTSRow);
taosMemoryFree(pTSRow);
// no data in buffer, return immediately
diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
index 51d7edcf71..6bb2b8c253 100644
--- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
+++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
@@ -21,6 +21,7 @@ struct STsdbSnapReader {
int64_t sver;
int64_t ever;
STsdbFS fs;
+ int8_t type;
// for data file
int8_t dataDone;
int32_t fid;
@@ -62,7 +63,8 @@ static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) {
pReader->iBlockIdx = 0;
pReader->pBlockIdx = NULL;
- tsdbInfo("vgId:%d vnode snapshot tsdb open data file to read, fid:%d", TD_VID(pTsdb->pVnode), pReader->fid);
+ tsdbInfo("vgId:%d vnode snapshot tsdb open data file to read for %s, fid:%d", TD_VID(pTsdb->pVnode), pTsdb->path,
+ pReader->fid);
}
while (true) {
@@ -130,7 +132,7 @@ static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) {
}
SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData);
- pHdr->type = 1;
+ pHdr->type = pReader->type;
pHdr->size = size;
TABLEID* pId = (TABLEID*)(&pHdr[1]);
@@ -139,9 +141,9 @@ static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) {
tPutBlockData((uint8_t*)(&pId[1]), &pReader->nBlockData);
- tsdbInfo("vgId:%d vnode snapshot read data, fid:%d suid:%" PRId64 " uid:%" PRId64
+ tsdbInfo("vgId:%d vnode snapshot read data for %s, fid:%d suid:%" PRId64 " uid:%" PRId64
" iBlock:%d minVersion:%d maxVersion:%d nRow:%d out of %d size:%d",
- TD_VID(pTsdb->pVnode), pReader->fid, pReader->pBlockIdx->suid, pReader->pBlockIdx->uid,
+ TD_VID(pTsdb->pVnode), pTsdb->path, pReader->fid, pReader->pBlockIdx->suid, pReader->pBlockIdx->uid,
pReader->iBlock - 1, pBlock->minVersion, pBlock->maxVersion, pReader->nBlockData.nRow, pBlock->nRow,
size);
@@ -154,7 +156,8 @@ _exit:
return code;
_err:
- tsdbError("vgId:%d vnode snapshot tsdb read data failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d vnode snapshot tsdb read data for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
+ tstrerror(code));
return code;
}
@@ -212,7 +215,7 @@ static int32_t tsdbSnapReadDel(STsdbSnapReader* pReader, uint8_t** ppData) {
}
SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData);
- pHdr->type = 2;
+ pHdr->type = SNAP_DATA_DEL;
pHdr->size = size;
TABLEID* pId = (TABLEID*)(&pHdr[1]);
@@ -228,8 +231,8 @@ static int32_t tsdbSnapReadDel(STsdbSnapReader* pReader, uint8_t** ppData) {
n += tPutDelData((*ppData) + n, pDelData);
}
- tsdbInfo("vgId:%d vnode snapshot tsdb read del data, suid:%" PRId64 " uid:%d" PRId64 " size:%d",
- TD_VID(pTsdb->pVnode), pDelIdx->suid, pDelIdx->uid, size);
+ tsdbInfo("vgId:%d vnode snapshot tsdb read del data for %s, suid:%" PRId64 " uid:%d" PRId64 " size:%d",
+ TD_VID(pTsdb->pVnode), pTsdb->path, pDelIdx->suid, pDelIdx->uid, size);
break;
}
@@ -238,11 +241,12 @@ _exit:
return code;
_err:
- tsdbError("vgId:%d vnode snapshot tsdb read del failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d vnode snapshot tsdb read del for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->pVnode,
+ tstrerror(code));
return code;
}
-int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapReader** ppReader) {
+int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, int8_t type, STsdbSnapReader** ppReader) {
int32_t code = 0;
STsdbSnapReader* pReader = NULL;
@@ -255,6 +259,7 @@ int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapRe
pReader->pTsdb = pTsdb;
pReader->sver = sver;
pReader->ever = ever;
+ pReader->type = type;
code = taosThreadRwlockRdlock(&pTsdb->rwLock);
if (code) {
@@ -297,12 +302,13 @@ int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapRe
goto _err;
}
- tsdbInfo("vgId:%d vnode snapshot tsdb reader opened", TD_VID(pTsdb->pVnode));
+ tsdbInfo("vgId:%d vnode snapshot tsdb reader opened for %s", TD_VID(pTsdb->pVnode), pTsdb->path);
*ppReader = pReader;
return code;
_err:
- tsdbError("vgId:%d vnode snapshot tsdb reader open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d vnode snapshot tsdb reader open for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
+ tstrerror(code));
*ppReader = NULL;
return code;
}
@@ -327,7 +333,7 @@ int32_t tsdbSnapReaderClose(STsdbSnapReader** ppReader) {
tsdbFSUnref(pReader->pTsdb, &pReader->fs);
- tsdbInfo("vgId:%d vnode snapshot tsdb reader closed", TD_VID(pReader->pTsdb->pVnode));
+ tsdbInfo("vgId:%d vnode snapshot tsdb reader closed for %s", TD_VID(pReader->pTsdb->pVnode), pReader->pTsdb->path);
taosMemoryFree(pReader);
*ppReader = NULL;
@@ -368,10 +374,12 @@ int32_t tsdbSnapRead(STsdbSnapReader* pReader, uint8_t** ppData) {
}
_exit:
+ tsdbDebug("vgId:%d vnode snapshot tsdb read for %s", TD_VID(pReader->pTsdb->pVnode), pReader->pTsdb->path);
return code;
_err:
- tsdbError("vgId:%d vnode snapshot tsdb read failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d vnode snapshot tsdb read for %s failed since %s", TD_VID(pReader->pTsdb->pVnode),
+ pReader->pTsdb->path, tstrerror(code));
return code;
}
@@ -436,7 +444,8 @@ static int32_t tsdbSnapWriteAppendData(STsdbSnapWriter* pWriter, uint8_t* pData,
return code;
_err:
- tsdbError("vgId:%d tsdb snapshot write append data failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d tsdb snapshot write append data for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
+ pWriter->pTsdb->path, tstrerror(code));
return code;
}
@@ -522,9 +531,12 @@ static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) {
}
_exit:
+ tsdbInfo("vgId:%d tsdb snapshot write table data end for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
return code;
_err:
+ tsdbError("vgId:%d tsdb snapshot write table data end for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
+ pWriter->pTsdb->path, tstrerror(code));
return code;
}
@@ -570,6 +582,8 @@ _exit:
return code;
_err:
+ tsdbError("vgId:%d tsdb snapshot move write table data for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
+ pWriter->pTsdb->path, tstrerror(code));
return code;
}
@@ -708,8 +722,8 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) {
return code;
_err:
- tsdbError("vgId:%d vnode snapshot tsdb write table data impl failed since %s", TD_VID(pWriter->pTsdb->pVnode),
- tstrerror(code));
+ tsdbError("vgId:%d vnode snapshot tsdb write table data impl for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
+ pWriter->pTsdb->path, tstrerror(code));
return code;
}
@@ -794,11 +808,12 @@ static int32_t tsdbSnapWriteTableData(STsdbSnapWriter* pWriter, TABLEID id) {
if (code) goto _err;
_exit:
+ tsdbDebug("vgId:%d vnode snapshot tsdb write data impl for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
return code;
_err:
- tsdbError("vgId:%d vnode snapshot tsdb write data impl failed since %s", TD_VID(pWriter->pTsdb->pVnode),
- tstrerror(code));
+ tsdbError("vgId:%d vnode snapshot tsdb write data impl for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
+ pWriter->pTsdb->path, tstrerror(code));
return code;
}
@@ -833,11 +848,12 @@ static int32_t tsdbSnapWriteDataEnd(STsdbSnapWriter* pWriter) {
}
_exit:
- tsdbInfo("vgId:%d vnode snapshot tsdb writer data end", TD_VID(pTsdb->pVnode));
+ tsdbInfo("vgId:%d vnode snapshot tsdb writer data end for %s", TD_VID(pTsdb->pVnode), pTsdb->path);
return code;
_err:
- tsdbError("vgId:%d vnode snapshot tsdb writer data end failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d vnode snapshot tsdb writer data end for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
+ tstrerror(code));
return code;
}
@@ -920,12 +936,13 @@ static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint3
code = tsdbSnapWriteTableData(pWriter, id);
if (code) goto _err;
- tsdbInfo("vgId:%d vnode snapshot tsdb write data, fid:%d suid:%" PRId64 " uid:%" PRId64 " nRow:%d",
- TD_VID(pTsdb->pVnode), fid, id.suid, id.suid, pBlockData->nRow);
+ tsdbInfo("vgId:%d vnode snapshot tsdb write data for %s, fid:%d suid:%" PRId64 " uid:%" PRId64 " nRow:%d",
+ TD_VID(pTsdb->pVnode), pTsdb->path, fid, id.suid, id.suid, pBlockData->nRow);
return code;
_err:
- tsdbError("vgId:%d vnode snapshot tsdb write data failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d vnode snapshot tsdb write data for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
+ tstrerror(code));
return code;
}
@@ -1015,7 +1032,8 @@ _exit:
return code;
_err:
- tsdbError("vgId:%d vnode snapshot tsdb write del failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d vnode snapshot tsdb write del for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
+ tstrerror(code));
return code;
}
@@ -1056,11 +1074,12 @@ static int32_t tsdbSnapWriteDelEnd(STsdbSnapWriter* pWriter) {
}
_exit:
- tsdbInfo("vgId:%d vnode snapshot tsdb write del end", TD_VID(pTsdb->pVnode));
+ tsdbInfo("vgId:%d vnode snapshot tsdb write del for %s end", TD_VID(pTsdb->pVnode), pTsdb->path);
return code;
_err:
- tsdbError("vgId:%d vnode snapshot tsdb write del end failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d vnode snapshot tsdb write del end for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
+ tstrerror(code));
return code;
}
@@ -1127,10 +1146,12 @@ int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWr
}
*ppWriter = pWriter;
- return code;
+ tsdbInfo("vgId:%d tsdb snapshot writer open for %s succeed", TD_VID(pTsdb->pVnode), pTsdb->path);
+ return code;
_err:
- tsdbError("vgId:%d tsdb snapshot writer open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d tsdb snapshot writer open for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
+ tstrerror(code));
*ppWriter = NULL;
return code;
}
@@ -1157,14 +1178,16 @@ int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback) {
if (code) goto _err;
}
+ tsdbInfo("vgId:%d vnode snapshot tsdb writer close for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
taosMemoryFree(pWriter);
*ppWriter = NULL;
-
return code;
_err:
- tsdbError("vgId:%d vnode snapshot tsdb writer close failed since %s", TD_VID(pWriter->pTsdb->pVnode),
- tstrerror(code));
+ tsdbError("vgId:%d vnode snapshot tsdb writer close for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
+ pWriter->pTsdb->path, tstrerror(code));
+ taosMemoryFree(pWriter);
+ *ppWriter = NULL;
return code;
}
@@ -1173,7 +1196,7 @@ int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData)
SSnapDataHdr* pHdr = (SSnapDataHdr*)pData;
// ts data
- if (pHdr->type == 1) {
+ if (pHdr->type == SNAP_DATA_TSDB) {
code = tsdbSnapWriteData(pWriter, pData, nData);
if (code) goto _err;
@@ -1186,15 +1209,17 @@ int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData)
}
// del data
- if (pHdr->type == 2) {
+ if (pHdr->type == SNAP_DATA_DEL) {
code = tsdbSnapWriteDel(pWriter, pData, nData);
if (code) goto _err;
}
_exit:
+ tsdbDebug("vgId:%d tsdb snapshow write for %s succeed", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
return code;
_err:
- tsdbError("vgId:%d tsdb snapshow write failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d tsdb snapshow write for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path,
+ tstrerror(code));
return code;
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c
index 3e05b75dd0..8926e5e3c6 100644
--- a/source/dnode/vnode/src/tsdb/tsdbUtil.c
+++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c
@@ -151,26 +151,6 @@ int32_t tTABLEIDCmprFn(const void *p1, const void *p2) {
return 0;
}
-// TSDBKEY =======================================================================
-int32_t tsdbKeyCmprFn(const void *p1, const void *p2) {
- TSDBKEY *pKey1 = (TSDBKEY *)p1;
- TSDBKEY *pKey2 = (TSDBKEY *)p2;
-
- if (pKey1->ts < pKey2->ts) {
- return -1;
- } else if (pKey1->ts > pKey2->ts) {
- return 1;
- }
-
- if (pKey1->version < pKey2->version) {
- return -1;
- } else if (pKey1->version > pKey2->version) {
- return 1;
- }
-
- return 0;
-}
-
// TSDBKEY ======================================================
static FORCE_INLINE int32_t tPutTSDBKEY(uint8_t *p, TSDBKEY *pKey) {
int32_t n = 0;
@@ -1401,7 +1381,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
break;
case TSDB_DATA_TYPE_BOOL:
break;
- case TSDB_DATA_TYPE_TINYINT:{
+ case TSDB_DATA_TYPE_TINYINT: {
pColAgg->sum += colVal.value.i8;
if (pColAgg->min > colVal.value.i8) {
pColAgg->min = colVal.value.i8;
@@ -1411,7 +1391,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
}
break;
}
- case TSDB_DATA_TYPE_SMALLINT:{
+ case TSDB_DATA_TYPE_SMALLINT: {
pColAgg->sum += colVal.value.i16;
if (pColAgg->min > colVal.value.i16) {
pColAgg->min = colVal.value.i16;
@@ -1441,7 +1421,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
}
break;
}
- case TSDB_DATA_TYPE_FLOAT:{
+ case TSDB_DATA_TYPE_FLOAT: {
pColAgg->sum += colVal.value.f;
if (pColAgg->min > colVal.value.f) {
pColAgg->min = colVal.value.f;
@@ -1451,7 +1431,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
}
break;
}
- case TSDB_DATA_TYPE_DOUBLE:{
+ case TSDB_DATA_TYPE_DOUBLE: {
pColAgg->sum += colVal.value.d;
if (pColAgg->min > colVal.value.d) {
pColAgg->min = colVal.value.d;
@@ -1463,7 +1443,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
}
case TSDB_DATA_TYPE_VARCHAR:
break;
- case TSDB_DATA_TYPE_TIMESTAMP:{
+ case TSDB_DATA_TYPE_TIMESTAMP: {
if (pColAgg->min > colVal.value.i64) {
pColAgg->min = colVal.value.i64;
}
@@ -1474,7 +1454,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
}
case TSDB_DATA_TYPE_NCHAR:
break;
- case TSDB_DATA_TYPE_UTINYINT:{
+ case TSDB_DATA_TYPE_UTINYINT: {
pColAgg->sum += colVal.value.u8;
if (pColAgg->min > colVal.value.u8) {
pColAgg->min = colVal.value.u8;
@@ -1484,7 +1464,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
}
break;
}
- case TSDB_DATA_TYPE_USMALLINT:{
+ case TSDB_DATA_TYPE_USMALLINT: {
pColAgg->sum += colVal.value.u16;
if (pColAgg->min > colVal.value.u16) {
pColAgg->min = colVal.value.u16;
@@ -1494,7 +1474,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
}
break;
}
- case TSDB_DATA_TYPE_UINT:{
+ case TSDB_DATA_TYPE_UINT: {
pColAgg->sum += colVal.value.u32;
if (pColAgg->min > colVal.value.u32) {
pColAgg->min = colVal.value.u32;
@@ -1504,7 +1484,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
}
break;
}
- case TSDB_DATA_TYPE_UBIGINT:{
+ case TSDB_DATA_TYPE_UBIGINT: {
pColAgg->sum += colVal.value.u64;
if (pColAgg->min > colVal.value.u64) {
pColAgg->min = colVal.value.u64;
diff --git a/source/dnode/vnode/src/vnd/vnodeSnapshot.c b/source/dnode/vnode/src/vnd/vnodeSnapshot.c
index 3f8f81cb09..15cc6a7197 100644
--- a/source/dnode/vnode/src/vnd/vnodeSnapshot.c
+++ b/source/dnode/vnode/src/vnd/vnodeSnapshot.c
@@ -28,7 +28,8 @@ struct SVSnapReader {
int8_t tsdbDone;
STsdbSnapReader *pTsdbReader;
// rsma
- int8_t rsmaDone[TSDB_RETENTION_L2];
+ int8_t rsmaDone;
+ SRsmaSnapReader *pRsmaReader;
};
int32_t vnodeSnapReaderOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapReader **ppReader) {
@@ -57,6 +58,10 @@ _err:
int32_t vnodeSnapReaderClose(SVSnapReader *pReader) {
int32_t code = 0;
+ if (pReader->pRsmaReader) {
+ rsmaSnapReaderClose(&pReader->pRsmaReader);
+ }
+
if (pReader->pTsdbReader) {
tsdbSnapReaderClose(&pReader->pTsdbReader);
}
@@ -99,7 +104,7 @@ int32_t vnodeSnapRead(SVSnapReader *pReader, uint8_t **ppData, uint32_t *nData)
if (!pReader->tsdbDone) {
// open if not
if (pReader->pTsdbReader == NULL) {
- code = tsdbSnapReaderOpen(pReader->pVnode->pTsdb, pReader->sver, pReader->ever, &pReader->pTsdbReader);
+ code = tsdbSnapReaderOpen(pReader->pVnode->pTsdb, pReader->sver, pReader->ever, SNAP_DATA_TSDB, &pReader->pTsdbReader);
if (code) goto _err;
}
@@ -118,40 +123,26 @@ int32_t vnodeSnapRead(SVSnapReader *pReader, uint8_t **ppData, uint32_t *nData)
}
// RSMA ==============
-#if 0
- if (VND_IS_RSMA(pReader->pVnode)) {
- // RSMA1/RSMA2
- for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
- if (!pReader->rsmaDone[i]) {
- if (!pReader->pVnode->pSma->pRSmaTsdb[i]) {
- // no valid tsdb
- pReader->rsmaDone[i] = 1;
- continue;
- }
- if (pReader->pTsdbReader == NULL) {
- code = tsdbSnapReaderOpen(pReader->pVnode->pSma->pRSmaTsdb[i], pReader->sver, pReader->ever,
- &pReader->pTsdbReader);
- if (code) goto _err;
- }
+ if (VND_IS_RSMA(pReader->pVnode) && !pReader->rsmaDone) {
+ // open if not
+ if (pReader->pRsmaReader == NULL) {
+ code = rsmaSnapReaderOpen(pReader->pVnode->pSma, pReader->sver, pReader->ever, &pReader->pRsmaReader);
+ if (code) goto _err;
+ }
- code = tsdbSnapRead(pReader->pTsdbReader, ppData);
- if (code) {
- goto _err;
- } else {
- if (*ppData) {
- goto _exit;
- } else {
- pReader->tsdbDone = 1;
- code = tsdbSnapReaderClose(&pReader->pTsdbReader);
- if (code) goto _err;
- }
- }
+ code = rsmaSnapRead(pReader->pRsmaReader, ppData);
+ if (code) {
+ goto _err;
+ } else {
+ if (*ppData) {
+ goto _exit;
+ } else {
+ pReader->tsdbDone = 1;
+ code = rsmaSnapReaderClose(&pReader->pRsmaReader);
+ if (code) goto _err;
}
}
- // QTaskInfoFile
- // TODO ...
}
-#endif
*ppData = NULL;
*nData = 0;
@@ -186,6 +177,8 @@ struct SVSnapWriter {
SMetaSnapWriter *pMetaSnapWriter;
// tsdb
STsdbSnapWriter *pTsdbSnapWriter;
+ // rsma
+ SRsmaSnapWriter *pRsmaSnapWriter;
};
int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWriter **ppWriter) {
@@ -235,6 +228,11 @@ int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *
if (code) goto _err;
}
+ if (pWriter->pRsmaSnapWriter) {
+ code = rsmaSnapWriterClose(&pWriter->pRsmaSnapWriter, rollback);
+ if (code) goto _err;
+ }
+
if (!rollback) {
SVnodeInfo info = {0};
char dir[TSDB_FILENAME_LEN];
@@ -282,28 +280,51 @@ int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData) {
vInfo("vgId:%d vnode snapshot write data, index:%" PRId64 " type:%d nData:%d", TD_VID(pVnode), pHdr->index,
pHdr->type, nData);
- if (pHdr->type == 0) {
- // meta
+ switch (pHdr->type) {
+ case SNAP_DATA_META: {
+ // meta
+ if (pWriter->pMetaSnapWriter == NULL) {
+ code = metaSnapWriterOpen(pVnode->pMeta, pWriter->sver, pWriter->ever, &pWriter->pMetaSnapWriter);
+ if (code) goto _err;
+ }
- if (pWriter->pMetaSnapWriter == NULL) {
- code = metaSnapWriterOpen(pVnode->pMeta, pWriter->sver, pWriter->ever, &pWriter->pMetaSnapWriter);
+ code = metaSnapWrite(pWriter->pMetaSnapWriter, pData, nData);
if (code) goto _err;
- }
+ } break;
+ case SNAP_DATA_TSDB: {
+ // tsdb
+ if (pWriter->pTsdbSnapWriter == NULL) {
+ code = tsdbSnapWriterOpen(pVnode->pTsdb, pWriter->sver, pWriter->ever, &pWriter->pTsdbSnapWriter);
+ if (code) goto _err;
+ }
- code = metaSnapWrite(pWriter->pMetaSnapWriter, pData, nData);
- if (code) goto _err;
- } else {
- // tsdb
-
- if (pWriter->pTsdbSnapWriter == NULL) {
- code = tsdbSnapWriterOpen(pVnode->pTsdb, pWriter->sver, pWriter->ever, &pWriter->pTsdbSnapWriter);
+ code = tsdbSnapWrite(pWriter->pTsdbSnapWriter, pData, nData);
if (code) goto _err;
- }
+ } break;
+ case SNAP_DATA_RSMA1:
+ case SNAP_DATA_RSMA2: {
+ // rsma1/rsma2
+ if (pWriter->pRsmaSnapWriter == NULL) {
+ code = rsmaSnapWriterOpen(pVnode->pSma, pWriter->sver, pWriter->ever, &pWriter->pRsmaSnapWriter);
+ if (code) goto _err;
+ }
- code = tsdbSnapWrite(pWriter->pTsdbSnapWriter, pData, nData);
- if (code) goto _err;
+ code = rsmaSnapWrite(pWriter->pRsmaSnapWriter, pData, nData);
+ if (code) goto _err;
+ } break;
+ case SNAP_DATA_QTASK: {
+ // qtask for rsma
+ if (pWriter->pRsmaSnapWriter == NULL) {
+ code = rsmaSnapWriterOpen(pVnode->pSma, pWriter->sver, pWriter->ever, &pWriter->pRsmaSnapWriter);
+ if (code) goto _err;
+ }
+
+ code = rsmaSnapWrite(pWriter->pRsmaSnapWriter, pData, nData);
+ if (code) goto _err;
+ } break;
+ default:
+ break;
}
-
_exit:
return code;
diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c
index a83e1ab85b..c1ce90d4ed 100644
--- a/source/dnode/vnode/src/vnd/vnodeSvr.c
+++ b/source/dnode/vnode/src/vnd/vnodeSvr.c
@@ -447,6 +447,7 @@ _err:
static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
SDecoder decoder = {0};
+ SEncoder encoder = {0};
int32_t rcode = 0;
SVCreateTbBatchReq req = {0};
SVCreateTbReq *pCreateReq;
@@ -515,7 +516,6 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR
tdUidStoreFree(pStore);
// prepare rsp
- SEncoder encoder = {0};
int32_t ret = 0;
tEncodeSize(tEncodeSVCreateTbBatchRsp, &rsp, pRsp->contLen, ret);
pRsp->pCont = rpcMallocCont(pRsp->contLen);
diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h
index 1aaa1ecfd7..5b5c6010e8 100644
--- a/source/libs/catalog/inc/catalogInt.h
+++ b/source/libs/catalog/inc/catalogInt.h
@@ -679,6 +679,8 @@ void ctgClearSubTaskRes(SCtgSubRes *pRes);
void ctgFreeQNode(SCtgQNode *node);
void ctgClearHandle(SCatalog* pCtg);
void ctgFreeTbCacheImpl(SCtgTbCache *pCache);
+int32_t ctgRemoveTbMeta(SCatalog* pCtg, SName* pTableName);
+int32_t ctgGetTbHashVgroup(SCatalog *pCtg, SRequestConnInfo *pConn, const SName *pTableName, SVgroupInfo *pVgroup);
extern SCatalogMgmt gCtgMgmt;
diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c
index 59f11898fa..933e65e582 100644
--- a/source/libs/catalog/src/catalog.c
+++ b/source/libs/catalog/src/catalog.c
@@ -92,7 +92,7 @@ int32_t ctgRefreshTbMeta(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMetaCtx*
int32_t code = 0;
if (!CTG_FLAG_IS_SYS_DB(ctx->flag)) {
- CTG_ERR_RET(catalogGetTableHashVgroup(pCtg, pConn, ctx->pName, &vgroupInfo));
+ CTG_ERR_RET(ctgGetTbHashVgroup(pCtg, pConn, ctx->pName, &vgroupInfo));
}
STableMetaOutput moutput = {0};
@@ -337,7 +337,10 @@ int32_t ctgGetTbType(SCatalog* pCtg, SRequestConnInfo *pConn, SName* pTableName,
}
STableMeta* pMeta = NULL;
- CTG_ERR_RET(catalogGetTableMeta(pCtg, pConn, pTableName, &pMeta));
+ SCtgTbMetaCtx ctx = {0};
+ ctx.pName = (SName*)pTableName;
+ ctx.flag = CTG_FLAG_UNKNOWN_STB;
+ CTG_ERR_RET(ctgGetTbMeta(pCtg, pConn, &ctx, &pMeta));
*tbType = pMeta->tableType;
taosMemoryFree(pMeta);
@@ -391,7 +394,7 @@ int32_t ctgGetTbCfg(SCatalog* pCtg, SRequestConnInfo *pConn, SName* pTableName,
CTG_ERR_RET(ctgGetTableCfgFromMnode(pCtg, pConn, pTableName, pCfg, NULL));
} else {
SVgroupInfo vgroupInfo = {0};
- CTG_ERR_RET(catalogGetTableHashVgroup(pCtg, pConn, pTableName, &vgroupInfo));
+ CTG_ERR_RET(ctgGetTbHashVgroup(pCtg, pConn, pTableName, &vgroupInfo));
CTG_ERR_RET(ctgGetTableCfgFromVnode(pCtg, pConn, pTableName, &vgroupInfo, pCfg, NULL));
}
@@ -477,6 +480,57 @@ _return:
CTG_RET(code);
}
+
+int32_t ctgGetTbHashVgroup(SCatalog *pCtg, SRequestConnInfo *pConn, const SName *pTableName, SVgroupInfo *pVgroup) {
+ if (IS_SYS_DBNAME(pTableName->dbname)) {
+ ctgError("no valid vgInfo for db, dbname:%s", pTableName->dbname);
+ CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT);
+ }
+
+ SCtgDBCache* dbCache = NULL;
+ int32_t code = 0;
+ char db[TSDB_DB_FNAME_LEN] = {0};
+ tNameGetFullDbName(pTableName, db);
+
+ SDBVgInfo *vgInfo = NULL;
+ CTG_ERR_JRET(ctgGetDBVgInfo(pCtg, pConn, db, &dbCache, &vgInfo));
+
+ CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, vgInfo ? vgInfo : dbCache->vgCache.vgInfo, pTableName, pVgroup));
+
+_return:
+
+ if (dbCache) {
+ ctgRUnlockVgInfo(dbCache);
+ ctgReleaseDBCache(pCtg, dbCache);
+ }
+
+ if (vgInfo) {
+ taosHashCleanup(vgInfo->vgHash);
+ taosMemoryFreeClear(vgInfo);
+ }
+
+ CTG_RET(code);
+}
+
+int32_t ctgRemoveTbMeta(SCatalog* pCtg, SName* pTableName) {
+ int32_t code = 0;
+
+ if (NULL == pCtg || NULL == pTableName) {
+ CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT);
+ }
+
+ if (NULL == pCtg->dbCache) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ CTG_ERR_JRET(ctgRemoveTbMetaFromCache(pCtg, pTableName, true));
+
+_return:
+
+ CTG_RET(code);
+}
+
+
int32_t catalogInit(SCatalogCfg* cfg) {
if (gCtgMgmt.pCluster) {
qError("catalog already initialized");
@@ -772,21 +826,7 @@ _return:
int32_t catalogRemoveTableMeta(SCatalog* pCtg, SName* pTableName) {
CTG_API_ENTER();
- int32_t code = 0;
-
- if (NULL == pCtg || NULL == pTableName) {
- CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
- }
-
- if (NULL == pCtg->dbCache) {
- CTG_API_LEAVE(TSDB_CODE_SUCCESS);
- }
-
- CTG_ERR_JRET(ctgRemoveTbMetaFromCache(pCtg, pTableName, true));
-
-_return:
-
- CTG_API_LEAVE(code);
+ CTG_API_LEAVE(ctgRemoveTbMeta(pCtg, pTableName));
}
int32_t catalogRemoveStbMeta(SCatalog* pCtg, const char* dbFName, uint64_t dbId, const char* stbName, uint64_t suid) {
@@ -878,12 +918,12 @@ int32_t catalogChkTbMetaVersion(SCatalog* pCtg, SRequestConnInfo *pConn, SArray*
case TSDB_CHILD_TABLE: {
SName stb = name;
strcpy(stb.tname, stbName);
- catalogRemoveTableMeta(pCtg, &stb);
+ ctgRemoveTbMeta(pCtg, &stb);
break;
}
case TSDB_SUPER_TABLE:
case TSDB_NORMAL_TABLE:
- catalogRemoveTableMeta(pCtg, &name);
+ ctgRemoveTbMeta(pCtg, &name);
break;
default:
ctgError("ignore table type %d", tbType);
@@ -947,34 +987,7 @@ int32_t catalogGetTableDistVgInfo(SCatalog* pCtg, SRequestConnInfo *pConn, const
int32_t catalogGetTableHashVgroup(SCatalog *pCtg, SRequestConnInfo *pConn, const SName *pTableName, SVgroupInfo *pVgroup) {
CTG_API_ENTER();
- if (IS_SYS_DBNAME(pTableName->dbname)) {
- ctgError("no valid vgInfo for db, dbname:%s", pTableName->dbname);
- CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
- }
-
- SCtgDBCache* dbCache = NULL;
- int32_t code = 0;
- char db[TSDB_DB_FNAME_LEN] = {0};
- tNameGetFullDbName(pTableName, db);
-
- SDBVgInfo *vgInfo = NULL;
- CTG_ERR_JRET(ctgGetDBVgInfo(pCtg, pConn, db, &dbCache, &vgInfo));
-
- CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, vgInfo ? vgInfo : dbCache->vgCache.vgInfo, pTableName, pVgroup));
-
-_return:
-
- if (dbCache) {
- ctgRUnlockVgInfo(dbCache);
- ctgReleaseDBCache(pCtg, dbCache);
- }
-
- if (vgInfo) {
- taosHashCleanup(vgInfo->vgHash);
- taosMemoryFreeClear(vgInfo);
- }
-
- CTG_API_LEAVE(code);
+ CTG_API_LEAVE(ctgGetTbHashVgroup(pCtg, pConn, pTableName, pVgroup));
}
int32_t catalogGetAllMeta(SCatalog* pCtg, SRequestConnInfo *pConn, const SCatalogReq* pReq, SMetaData* pRsp) {
@@ -1200,7 +1213,7 @@ int32_t catalogRefreshGetTableCfg(SCatalog* pCtg, SRequestConnInfo *pConn, const
}
int32_t code = 0;
- CTG_ERR_JRET(catalogRemoveTableMeta(pCtg, (SName*)pTableName));
+ CTG_ERR_JRET(ctgRemoveTbMeta(pCtg, (SName*)pTableName));
CTG_ERR_JRET(ctgGetTbCfg(pCtg, pConn, (SName*)pTableName, pCfg));
diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c
index f4cee13ec0..0184ac3a60 100644
--- a/source/libs/catalog/src/ctgAsync.c
+++ b/source/libs/catalog/src/ctgAsync.c
@@ -398,7 +398,7 @@ int32_t ctgHandleForceUpdate(SCatalog* pCtg, int32_t taskNum, SCtgJob *pJob, con
SName* name = taosHashIterate(pTb, NULL);
while (name) {
- catalogRemoveTableMeta(pCtg, name);
+ ctgRemoveTbMeta(pCtg, name);
name = taosHashIterate(pTb, name);
}
diff --git a/source/libs/command/inc/commandInt.h b/source/libs/command/inc/commandInt.h
index f60c518d73..104c007756 100644
--- a/source/libs/command/inc/commandInt.h
+++ b/source/libs/command/inc/commandInt.h
@@ -19,6 +19,8 @@
#ifdef __cplusplus
extern "C" {
#endif
+
+// clang-format off
#include "nodes.h"
#include "plannodes.h"
#include "ttime.h"
@@ -77,6 +79,8 @@ extern "C" {
#define EXPLAIN_EXECINFO_FORMAT "cost=%.3f..%.3f rows=%" PRIu64
#define EXPLAIN_MODE_FORMAT "mode=%s"
#define EXPLAIN_STRING_TYPE_FORMAT "%s"
+#define EXPLAIN_INPUT_ORDER_FORMAT "input_order=%s"
+#define EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT "output_order=%s"
#define COMMAND_RESET_LOG "resetLog"
#define COMMAND_SCHEDULE_POLICY "schedulePolicy"
@@ -122,7 +126,7 @@ typedef struct SExplainCtx {
SHashObj *groupHash; // Hash
} SExplainCtx;
-#define EXPLAIN_ORDER_STRING(_order) ((TSDB_ORDER_ASC == _order) ? "Ascending" : "Descending")
+#define EXPLAIN_ORDER_STRING(_order) ((ORDER_ASC == _order) ? "asc" : "desc")
#define EXPLAIN_JOIN_STRING(_type) ((JOIN_TYPE_INNER == _type) ? "Inner join" : "Join")
#define INVERAL_TIME_FROM_PRECISION_TO_UNIT(_t, _u, _p) (((_u) == 'n' || (_u) == 'y') ? (_t) : (convertTimeFromPrecisionToUnit(_t, _p, _u)))
diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c
index 266f96b41e..66a94f7e28 100644
--- a/source/libs/command/src/explain.c
+++ b/source/libs/command/src/explain.c
@@ -13,6 +13,7 @@
* along with this program. If not, see .
*/
+// clang-format off
#include "commandInt.h"
#include "plannodes.h"
#include "query.h"
@@ -849,6 +850,10 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pIntNode->window.pFuncs->length);
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pIntNode->window.node.pOutputDataBlockDesc->totalRowSize);
+ EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
+ EXPLAIN_ROW_APPEND(EXPLAIN_INPUT_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.inputTsOrder));
+ EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
+ EXPLAIN_ROW_APPEND(EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT, EXPLAIN_ORDER_STRING(pIntNode->window.outputTsOrder));
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
@@ -1154,7 +1159,9 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_MERGE_KEYS_FORMAT);
for (int32_t i = 0; i < LIST_LENGTH(pMergeNode->pMergeKeys); ++i) {
SOrderByExprNode *ptn = (SOrderByExprNode *)nodesListGetNode(pMergeNode->pMergeKeys, i);
- EXPLAIN_ROW_APPEND("%s ", nodesGetNameFromColumnNode(ptn->pExpr));
+ EXPLAIN_ROW_APPEND(EXPLAIN_STRING_TYPE_FORMAT, nodesGetNameFromColumnNode(ptn->pExpr));
+ EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
+ EXPLAIN_ROW_APPEND(EXPLAIN_STRING_TYPE_FORMAT, EXPLAIN_ORDER_STRING(ptn->order));
}
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h
index be97b20455..c3dad1ed7c 100644
--- a/source/libs/executor/inc/executil.h
+++ b/source/libs/executor/inc/executil.h
@@ -103,7 +103,7 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int
void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo);
void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList);
-bool hasDataInGroupInfo(SGroupResInfo* pGroupResInfo);
+bool hasRemainResults(SGroupResInfo* pGroupResInfo);
int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo);
diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h
index a80c2c2fea..a70e7cd1dd 100644
--- a/source/libs/executor/inc/executorimpl.h
+++ b/source/libs/executor/inc/executorimpl.h
@@ -297,6 +297,20 @@ enum {
TABLE_SCAN__BLOCK_ORDER = 2,
};
+typedef struct SAggSupporter {
+ SHashObj* pResultRowHashTable; // quick locate the window object for each result
+ char* keyBuf; // window key buffer
+ SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
+ int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
+} SAggSupporter;
+
+typedef struct {
+ // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if current data block needs to be loaded.
+ SInterval interval;
+ SAggSupporter *pAggSup;
+ SExprSupp *pExprSup; // expr supporter of aggregate operator
+} SAggOptrPushDownInfo;
+
typedef struct STableScanInfo {
STsdbReader* dataReader;
SReadHandle readHandle;
@@ -312,12 +326,13 @@ typedef struct STableScanInfo {
SQueryTableDataCond cond;
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
int32_t dataBlockLoadFlag;
- SInterval interval; // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if current data block needs to be loaded.
+// SInterval interval; // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if current data block needs to be loaded.
SSampleExecInfo sample; // sample execution info
int32_t currentGroupId;
int32_t currentTable;
int8_t scanMode;
int8_t noTable;
+ SAggOptrPushDownInfo pdInfo;
int8_t assignBlockUid;
} STableScanInfo;
@@ -505,13 +520,6 @@ typedef struct SOptrBasicInfo {
SSDataBlock* pRes;
} SOptrBasicInfo;
-typedef struct SAggSupporter {
- SHashObj* pResultRowHashTable; // quick locate the window object for each result
- char* keyBuf; // window key buffer
- SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
- int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
-} SAggSupporter;
-
typedef struct SIntervalAggOperatorInfo {
// SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo; // basic info
@@ -523,7 +531,8 @@ typedef struct SIntervalAggOperatorInfo {
STimeWindow win; // query time range
bool timeWindowInterpo; // interpolation needed or not
SArray* pInterpCols; // interpolation columns
- int32_t order; // current SSDataBlock scan order
+ int32_t resultTsOrder; // result timestamp order
+ int32_t inputOrder; // input data ts order
EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
STimeWindowAggSupp twAggSup;
bool invertible;
@@ -533,8 +542,7 @@ typedef struct SIntervalAggOperatorInfo {
SArray* pDelWins; // SWinRes
int32_t delIndex;
SSDataBlock* pDelRes;
-
- SNode *pCondition;
+ SNode* pCondition;
} SIntervalAggOperatorInfo;
typedef struct SMergeAlignedIntervalAggOperatorInfo {
@@ -804,7 +812,7 @@ typedef struct STagFilterOperatorInfo {
typedef struct SJoinOperatorInfo {
SSDataBlock *pRes;
int32_t joinType;
- int32_t inputTsOrder;
+ int32_t inputOrder;
SSDataBlock *pLeft;
int32_t leftPos;
@@ -847,7 +855,6 @@ void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWin
int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadInfo, int32_t numOfRows, char* pData,
int32_t compLen, int32_t numOfOutput, int64_t startTs, uint64_t* total,
SArray* pColList);
-STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key);
STimeWindow getFirstQualifiedTimeWindow(int64_t ts, STimeWindow* pWindow, SInterval* pInterval, int32_t order);
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag);
@@ -978,9 +985,8 @@ int32_t decodeOperator(SOperatorInfo* ops, const char* data, int32_t length);
void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status);
int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
const char* sql, EOPTR_EXEC_MODEL model);
-int32_t createDataSinkParam(SDataSinkNode *pNode, void **pParam, qTaskInfo_t* pTaskInfo, SReadHandle* readHandle);
-int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo** pRes, int32_t* capacity,
- int32_t* resNum);
+int32_t createDataSinkParam(SDataSinkNode *pNode, void **pParam, qTaskInfo_t* pTaskInfo, SReadHandle* readHandle);
+int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInfoList);
int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result);
int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* length);
@@ -1019,6 +1025,7 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
void copyUpdateDataBlock(SSDataBlock* pDest, SSDataBlock* pSource, int32_t tsColIndex);
+bool groupbyTbname(SNodeList* pGroupList);
int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle, SNodeList* groupKey);
SSDataBlock* createSpecialDataBlock(EStreamType type);
void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput);
diff --git a/source/libs/executor/inc/tfill.h b/source/libs/executor/inc/tfill.h
index 0349632b9a..b604794dad 100644
--- a/source/libs/executor/inc/tfill.h
+++ b/source/libs/executor/inc/tfill.h
@@ -74,9 +74,9 @@ void taosFillSetInputDataBlock(struct SFillInfo* pFillInfo, const struct SSDataB
struct SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfOutput, const struct SNodeListNode* val);
bool taosFillHasMoreResults(struct SFillInfo* pFillInfo);
-SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols,
+SFillInfo* taosCreateFillInfo(TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols,
SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, int32_t slotId,
- const char* id);
+ int32_t order, const char* id);
void* taosDestroyFillInfo(struct SFillInfo *pFillInfo);
int64_t taosFillResultDataBlock(struct SFillInfo* pFillInfo, SSDataBlock* p, int32_t capacity);
diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c
index 65df7140f7..96c20d6136 100644
--- a/source/libs/executor/src/executil.c
+++ b/source/libs/executor/src/executil.c
@@ -137,7 +137,7 @@ void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayL
ASSERT(pGroupResInfo->index <= getNumOfTotalRes(pGroupResInfo));
}
-bool hasDataInGroupInfo(SGroupResInfo* pGroupResInfo) {
+bool hasRemainResults(SGroupResInfo* pGroupResInfo) {
if (pGroupResInfo->pRows == NULL) {
return false;
}
diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c
index 171f39db1b..8f3ecafbea 100644
--- a/source/libs/executor/src/executor.c
+++ b/source/libs/executor/src/executor.c
@@ -242,25 +242,17 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
}
// todo refactor STableList
+ bool assignUid = false;
size_t bufLen = (pScanInfo->pGroupTags != NULL) ? getTableTagsBufLen(pScanInfo->pGroupTags) : 0;
char* keyBuf = NULL;
if (bufLen > 0) {
+ assignUid = groupbyTbname(pScanInfo->pGroupTags);
keyBuf = taosMemoryMalloc(bufLen);
if (keyBuf == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
}
- bool assignUid = false;
-
- if (LIST_LENGTH(pScanInfo->pGroupTags) > 0) {
- SNode* p = nodesListGetNode(pScanInfo->pGroupTags, 0);
- if (p->type == QUERY_NODE_FUNCTION) {
- // partition by tbname/group by tbname
- assignUid = (strcmp(((struct SFunctionNode*)p)->functionName, "tbname") == 0);
- }
- }
-
for (int32_t i = 0; i < taosArrayGetSize(qa); ++i) {
uint64_t* uid = taosArrayGet(qa, i);
STableKeyInfo keyInfo = {.uid = *uid, .groupId = 0};
@@ -504,11 +496,9 @@ void qDestroyTask(qTaskInfo_t qTaskHandle) {
doDestroyTask(pTaskInfo);
}
-int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, int32_t* resNum, SExplainExecInfo** pRes) {
+int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
- int32_t capacity = 0;
-
- return getOperatorExplainExecInfo(pTaskInfo->pRoot, pRes, &capacity, resNum);
+ return getOperatorExplainExecInfo(pTaskInfo->pRoot, pExecInfoList);
}
int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len) {
diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c
index 525d7bf336..084fbbea7c 100644
--- a/source/libs/executor/src/executorimpl.c
+++ b/source/libs/executor/src/executorimpl.c
@@ -141,8 +141,7 @@ static int32_t doCopyToSDataBlock(SExecTaskInfo* taskInfo, SSDataBlock* pBlock,
SqlFunctionCtx* pCtx, int32_t numOfExprs);
static void initCtxOutputBuffer(SqlFunctionCtx* pCtx, int32_t size);
-static void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, SAggOperatorInfo* pAggInfo, int32_t numOfOutput,
- uint64_t groupId);
+static void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId);
// setup the output buffer for each operator
static bool hasNull(SColumn* pColumn, SColumnDataAgg* pStatis) {
@@ -1393,10 +1392,11 @@ void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const int8_t* rowR
}
}
-void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, SAggOperatorInfo* pAggInfo, int32_t numOfOutput,
- uint64_t groupId) {
+void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId) {
// for simple group by query without interval, all the tables belong to one group result.
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SAggOperatorInfo* pAggInfo = pOperator->info;
+
SResultRowInfo* pResultRowInfo = &pAggInfo->binfo.resultRowInfo;
SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx;
int32_t* rowEntryInfoOffset = pOperator->exprSupp.rowEntryInfoOffset;
@@ -1420,14 +1420,13 @@ void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, SAggOperatorInfo* pAggIn
setResultRowInitCtx(pResultRow, pCtx, numOfOutput, rowEntryInfoOffset);
}
-void setExecutionContext(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId, SAggOperatorInfo* pAggInfo) {
- if (pAggInfo->groupId != INT32_MIN && pAggInfo->groupId == groupId) {
+static void setExecutionContext(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId) {
+ SAggOperatorInfo* pAggInfo = pOperator->info;
+ if (pAggInfo->groupId != UINT64_MAX && pAggInfo->groupId == groupId) {
return;
}
-#ifdef BUF_PAGE_DEBUG
- qDebug("page_setbuf, groupId:%" PRIu64, groupId);
-#endif
- doSetTableGroupOutputBuf(pOperator, pAggInfo, numOfOutput, groupId);
+
+ doSetTableGroupOutputBuf(pOperator, numOfOutput, groupId);
// record the current active group id
pAggInfo->groupId = groupId;
@@ -1594,7 +1593,7 @@ void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SG
pBlock->info.version = pTaskInfo->version;
blockDataCleanup(pBlock);
- if (!hasDataInGroupInfo(pGroupResInfo)) {
+ if (!hasRemainResults(pGroupResInfo)) {
return;
}
@@ -2931,7 +2930,7 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
}
// the pDataBlock are always the same one, no need to call this again
- setExecutionContext(pOperator, pOperator->exprSupp.numOfExprs, pBlock->info.groupId, pAggInfo);
+ setExecutionContext(pOperator, pOperator->exprSupp.numOfExprs, pBlock->info.groupId);
setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, true);
code = doAggregateImpl(pOperator, pSup->pCtx);
if (code != 0) {
@@ -2966,7 +2965,7 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) {
doBuildResultDatablock(pOperator, pInfo, &pAggInfo->groupResInfo, pAggInfo->aggSup.pResultBuf);
doFilter(pAggInfo->pCondition, pInfo->pRes, NULL);
- if (!hasDataInGroupInfo(&pAggInfo->groupResInfo)) {
+ if (!hasRemainResults(&pAggInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator);
break;
}
@@ -3501,7 +3500,7 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo*
goto _error;
}
- pInfo->groupId = INT32_MIN;
+ pInfo->groupId = UINT64_MAX;
pInfo->pCondition = pCondition;
pOperator->name = "TableAggregate";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_HASH_AGG;
@@ -3513,6 +3512,12 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo*
pOperator->fpSet = createOperatorFpSet(doOpenAggregateOptr, getAggregateResult, NULL, NULL, destroyAggOperatorInfo,
aggEncodeResultRow, aggDecodeResultRow, NULL);
+ if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
+ STableScanInfo* pTableScanInfo = downstream->info;
+ pTableScanInfo->pdInfo.pExprSup = &pOperator->exprSupp;
+ pTableScanInfo->pdInfo.pAggSup = &pInfo->aggSup;
+ }
+
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
@@ -3586,15 +3591,14 @@ void doDestroyExchangeOperatorInfo(void* param) {
}
static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t numOfCols, SNodeListNode* pValNode,
- STimeWindow win, int32_t capacity, const char* id, SInterval* pInterval, int32_t fillType) {
+ STimeWindow win, int32_t capacity, const char* id, SInterval* pInterval, int32_t fillType, int32_t order) {
SFillColInfo* pColInfo = createFillColInfo(pExpr, numOfCols, pValNode);
STimeWindow w = getAlignQueryTimeWindow(pInterval, pInterval->precision, win.skey);
w = getFirstQualifiedTimeWindow(win.skey, &w, pInterval, TSDB_ORDER_ASC);
- int32_t order = TSDB_ORDER_ASC;
pInfo->pFillInfo =
- taosCreateFillInfo(order, w.skey, 0, capacity, numOfCols, pInterval, fillType, pColInfo, pInfo->primaryTsCol, id);
+ taosCreateFillInfo(w.skey, 0, capacity, numOfCols, pInterval, fillType, pColInfo, pInfo->primaryTsCol, order, id);
pInfo->win = win;
pInfo->p = taosMemoryCalloc(numOfCols, POINTER_BYTES);
@@ -3624,6 +3628,7 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode*
? &((SMergeAlignedIntervalAggOperatorInfo*)downstream->info)->intervalAggOperatorInfo->interval
: &((SIntervalAggOperatorInfo*)downstream->info)->interval;
+ int32_t order = (pPhyFillNode->inputTsOrder == ORDER_ASC)? TSDB_ORDER_ASC:TSDB_ORDER_DESC;
int32_t type = convertFillType(pPhyFillNode->mode);
SResultInfo* pResultInfo = &pOperator->resultInfo;
@@ -3635,7 +3640,7 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode*
&numOfOutputCols, COL_MATCH_FROM_SLOT_ID);
int32_t code = initFillInfo(pInfo, pExprInfo, num, (SNodeListNode*)pPhyFillNode->pValues, pPhyFillNode->timeRange,
- pResultInfo->capacity, pTaskInfo->id.str, pInterval, type);
+ pResultInfo->capacity, pTaskInfo->id.str, pInterval, type, order);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -3825,6 +3830,19 @@ static int32_t sortTableGroup(STableListInfo* pTableListInfo, int32_t groupNum)
return TDB_CODE_SUCCESS;
}
+bool groupbyTbname(SNodeList* pGroupList) {
+ bool bytbname = false;
+ if (LIST_LENGTH(pGroupList) > 0) {
+ SNode* p = nodesListGetNode(pGroupList, 0);
+ if (p->type == QUERY_NODE_FUNCTION) {
+ // partition by tbname/group by tbname
+ bytbname = (strcmp(((struct SFunctionNode*)p)->functionName, "tbname") == 0);
+ }
+ }
+
+ return bytbname;
+}
+
int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle, SNodeList* group) {
if (group == NULL) {
return TDB_CODE_SUCCESS;
@@ -3851,12 +3869,21 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle,
return TSDB_CODE_OUT_OF_MEMORY;
}
+ bool assignUid = groupbyTbname(group);
+
int32_t groupNum = 0;
- for (int32_t i = 0; i < taosArrayGetSize(pTableListInfo->pTableList); i++) {
+ size_t numOfTables = taosArrayGetSize(pTableListInfo->pTableList);
+
+ for (int32_t i = 0; i < numOfTables; i++) {
STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i);
- int32_t code = getGroupIdFromTagsVal(pHandle->meta, info->uid, group, keyBuf, &info->groupId);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
+
+ if (assignUid) {
+ info->groupId = info->uid;
+ } else {
+ int32_t code = getGroupIdFromTagsVal(pHandle->meta, info->uid, group, keyBuf, &info->groupId);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
}
taosHashPut(pTableListInfo->map, &(info->uid), sizeof(uint64_t), &info->groupId, sizeof(uint64_t));
@@ -4586,42 +4613,29 @@ void releaseQueryBuf(size_t numOfTables) {
atomic_add_fetch_64(&tsQueryBufferSizeBytes, t);
}
-int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo** pRes, int32_t* capacity,
- int32_t* resNum) {
- if (*resNum >= *capacity) {
- *capacity += 10;
+int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInfoList) {
+ SExplainExecInfo execInfo = {0};
+ SExplainExecInfo* pExplainInfo = taosArrayPush(pExecInfoList, &execInfo);
- *pRes = taosMemoryRealloc(*pRes, (*capacity) * sizeof(SExplainExecInfo));
- if (NULL == *pRes) {
- qError("malloc %d failed", (*capacity) * (int32_t)sizeof(SExplainExecInfo));
- return TSDB_CODE_QRY_OUT_OF_MEMORY;
- }
- }
-
- SExplainExecInfo* pInfo = &(*pRes)[*resNum];
-
- pInfo->numOfRows = operatorInfo->resultInfo.totalRows;
- pInfo->startupCost = operatorInfo->cost.openCost;
- pInfo->totalCost = operatorInfo->cost.totalCost;
+ pExplainInfo->numOfRows = operatorInfo->resultInfo.totalRows;
+ pExplainInfo->startupCost = operatorInfo->cost.openCost;
+ pExplainInfo->totalCost = operatorInfo->cost.totalCost;
+ pExplainInfo->verboseLen = 0;
+ pExplainInfo->verboseInfo = NULL;
if (operatorInfo->fpSet.getExplainFn) {
- int32_t code = operatorInfo->fpSet.getExplainFn(operatorInfo, &pInfo->verboseInfo, &pInfo->verboseLen);
+ int32_t code = operatorInfo->fpSet.getExplainFn(operatorInfo, &pExplainInfo->verboseInfo, &pExplainInfo->verboseLen);
if (code) {
qError("%s operator getExplainFn failed, code:%s", GET_TASKID(operatorInfo->pTaskInfo), tstrerror(code));
return code;
}
- } else {
- pInfo->verboseLen = 0;
- pInfo->verboseInfo = NULL;
}
- ++(*resNum);
-
int32_t code = 0;
for (int32_t i = 0; i < operatorInfo->numOfDownstream; ++i) {
- code = getOperatorExplainExecInfo(operatorInfo->pDownstream[i], pRes, capacity, resNum);
- if (code) {
- taosMemoryFreeClear(*pRes);
+ code = getOperatorExplainExecInfo(operatorInfo->pDownstream[i], pExecInfoList);
+ if (code != TSDB_CODE_SUCCESS) {
+// taosMemoryFreeClear(*pRes);
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
}
diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c
index c83206b730..5690389302 100644
--- a/source/libs/executor/src/groupoperator.c
+++ b/source/libs/executor/src/groupoperator.c
@@ -31,14 +31,21 @@ static int32_t* setupColumnOffset(const SSDataBlock* pBlock, int32_t rowCapacity
static int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, int16_t bytes,
uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup);
+static void freeGroupKey(void* param) {
+ SGroupKeys* pKey = (SGroupKeys*) param;
+ taosMemoryFree(pKey->pData);
+}
+
static void destroyGroupOperatorInfo(void* param, int32_t numOfOutput) {
SGroupbyOperatorInfo* pInfo = (SGroupbyOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
taosMemoryFreeClear(pInfo->keyBuf);
taosArrayDestroy(pInfo->pGroupCols);
- taosArrayDestroy(pInfo->pGroupColVals);
+ taosArrayDestroyEx(pInfo->pGroupColVals, freeGroupKey);
cleanupExprSupp(&pInfo->scalarSup);
-
+
+ cleanupGroupResInfo(&pInfo->groupResInfo);
+ cleanupAggSup(&pInfo->aggSup);
taosMemoryFreeClear(param);
}
@@ -301,8 +308,7 @@ static SSDataBlock* buildGroupResultDataBlock(SOperatorInfo* pOperator) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
doFilter(pInfo->pCondition, pRes, NULL);
- bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo);
- if (!hasRemain) {
+ if (!hasRemainResults(&pInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator);
break;
}
@@ -415,8 +421,6 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
// pOperator->operatorType = OP_Groupby;
- pOperator->exprSupp.pExprInfo = pExprInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->info = pInfo;
pOperator->pTaskInfo = pTaskInfo;
diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c
index 8902804fab..17b81cdb82 100644
--- a/source/libs/executor/src/joinoperator.c
+++ b/source/libs/executor/src/joinoperator.c
@@ -77,11 +77,11 @@ SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t
pInfo->pCondAfterMerge = NULL;
}
- pInfo->inputTsOrder = TSDB_ORDER_ASC;
+ pInfo->inputOrder = TSDB_ORDER_ASC;
if (pJoinNode->inputTsOrder == ORDER_ASC) {
- pInfo->inputTsOrder = TSDB_ORDER_ASC;
+ pInfo->inputOrder = TSDB_ORDER_ASC;
} else if (pJoinNode->inputTsOrder == ORDER_DESC) {
- pInfo->inputTsOrder = TSDB_ORDER_DESC;
+ pInfo->inputOrder = TSDB_ORDER_DESC;
}
pOperator->fpSet =
@@ -312,7 +312,7 @@ static void doMergeJoinImpl(struct SOperatorInfo* pOperator, SSDataBlock* pRes)
int32_t nrows = pRes->info.rows;
- bool asc = (pJoinInfo->inputTsOrder == TSDB_ORDER_ASC) ? true : false;
+ bool asc = (pJoinInfo->inputOrder == TSDB_ORDER_ASC) ? true : false;
while (1) {
int64_t leftTs = 0;
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index 4a2f57d628..fc67f3da6c 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -166,6 +166,67 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn
return false;
}
+// this function is for table scanner to extract temporary results of upstream aggregate results.
+static SResultRow* getTableGroupOutputBuf(SOperatorInfo* pOperator, uint64_t groupId, SFilePage** pPage) {
+ if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
+ return NULL;
+ }
+
+ int64_t buf[2] = {0};
+ SET_RES_WINDOW_KEY((char*)buf, &groupId, sizeof(groupId), groupId);
+
+ STableScanInfo* pTableScanInfo = pOperator->info;
+
+ SResultRowPosition* p1 =
+ (SResultRowPosition*)taosHashGet(pTableScanInfo->pdInfo.pAggSup->pResultRowHashTable, buf, GET_RES_WINDOW_KEY_LEN(sizeof(groupId)));
+
+ if (p1 == NULL) {
+ return NULL;
+ }
+
+ *pPage = getBufPage(pTableScanInfo->pdInfo.pAggSup->pResultBuf, p1->pageId);
+ return (SResultRow*)((char*)(*pPage) + p1->offset);
+}
+
+static int32_t doDynamicPruneDataBlock(SOperatorInfo* pOperator, SDataBlockInfo* pBlockInfo, uint32_t* status) {
+ STableScanInfo* pTableScanInfo = pOperator->info;
+
+ if (pTableScanInfo->pdInfo.pExprSup == NULL) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ SExprSupp* pSup1 = pTableScanInfo->pdInfo.pExprSup;
+
+ SFilePage* pPage = NULL;
+ SResultRow* pRow = getTableGroupOutputBuf(pOperator, pBlockInfo->groupId, &pPage);
+
+ if (pRow == NULL) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ bool notLoadBlock = true;
+ for (int32_t i = 0; i < pSup1->numOfExprs; ++i) {
+ int32_t functionId = pSup1->pCtx[i].functionId;
+
+ SResultRowEntryInfo* pEntry = getResultEntryInfo(pRow, i, pTableScanInfo->pdInfo.pExprSup->rowEntryInfoOffset);
+
+ int32_t reqStatus = fmFuncDynDataRequired(functionId, pEntry, &pBlockInfo->window);
+ if (reqStatus != FUNC_DATA_REQUIRED_NOT_LOAD) {
+ notLoadBlock = false;
+ break;
+ }
+ }
+
+ // release buffer pages
+ releaseBufPage(pTableScanInfo->pdInfo.pAggSup->pResultBuf, pPage);
+
+ if (notLoadBlock) {
+ *status = FUNC_DATA_REQUIRED_NOT_LOAD;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableScanInfo, SSDataBlock* pBlock,
uint32_t* status) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
@@ -178,7 +239,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
*status = pInfo->dataBlockLoadFlag;
if (pTableScanInfo->pFilterNode != NULL ||
- overlapWithTimeWindow(&pTableScanInfo->interval, &pBlock->info, pTableScanInfo->cond.order)) {
+ overlapWithTimeWindow(&pTableScanInfo->pdInfo.interval, &pBlock->info, pTableScanInfo->cond.order)) {
(*status) = FUNC_DATA_REQUIRED_DATA_LOAD;
}
@@ -232,6 +293,16 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
ASSERT(*status == FUNC_DATA_REQUIRED_DATA_LOAD);
// todo filter data block according to the block sma data firstly
+
+ doDynamicPruneDataBlock(pOperator, pBlockInfo, status);
+ if (*status == FUNC_DATA_REQUIRED_NOT_LOAD) {
+ qDebug("%s data block skipped, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
+ pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
+ pCost->skipBlocks += 1;
+
+ return TSDB_CODE_SUCCESS;
+ }
+
#if 0
if (!doFilterByBlockStatistics(pBlock->pBlockStatis, pTableScanInfo->pCtx, pBlockInfo->rows)) {
pCost->filterOutBlocks += 1;
@@ -263,18 +334,20 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
}
}
- int64_t st = taosGetTimestampMs();
- doFilter(pTableScanInfo->pFilterNode, pBlock, pTableScanInfo->pColMatchInfo);
+ if (pTableScanInfo->pFilterNode != NULL) {
+ int64_t st = taosGetTimestampUs();
+ doFilter(pTableScanInfo->pFilterNode, pBlock, pTableScanInfo->pColMatchInfo);
- int64_t et = taosGetTimestampMs();
- pTableScanInfo->readRecorder.filterTime += (et - st);
+ double el = (taosGetTimestampUs() - st) / 1000.0;
+ pTableScanInfo->readRecorder.filterTime += el;
- if (pBlock->info.rows == 0) {
- pCost->filterOutBlocks += 1;
- qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
- pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
- } else {
- qDebug("%s data block filter out, elapsed time:%" PRId64, GET_TASKID(pTaskInfo), (et - st));
+ if (pBlock->info.rows == 0) {
+ pCost->filterOutBlocks += 1;
+ qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d, elapsed time:%.2f ms",
+ GET_TASKID(pTaskInfo), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, el);
+ } else {
+ qDebug("%s data block filter applied, elapsed time:%.2f ms", GET_TASKID(pTaskInfo), el);
+ }
}
return TSDB_CODE_SUCCESS;
@@ -607,10 +680,11 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
}
pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]};
- // pInfo->scanInfo = (SScanInfo){.numOfAsc = 0, .numOfDesc = 1}; // for debug purpose
+// pInfo->scanInfo = (SScanInfo){.numOfAsc = 0, .numOfDesc = 1}; // for debug purpose
+// pInfo->cond.order = TSDB_ORDER_DESC;
+ pInfo->pdInfo.interval = extractIntervalInfo(pTableScanNode);
pInfo->readHandle = *readHandle;
- pInfo->interval = extractIntervalInfo(pTableScanNode);
pInfo->sample.sampleRatio = pTableScanNode->ratio;
pInfo->sample.seed = taosGetTimestampSec();
@@ -1489,14 +1563,14 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->tqReader = pHandle->tqReader;
}
- if (pTSInfo->interval.interval > 0) {
- pInfo->pUpdateInfo = updateInfoInitP(&pTSInfo->interval, pInfo->twAggSup.waterMark);
+ if (pTSInfo->pdInfo.interval.interval > 0) {
+ pInfo->pUpdateInfo = updateInfoInitP(&pTSInfo->pdInfo.interval, pInfo->twAggSup.waterMark);
} else {
pInfo->pUpdateInfo = NULL;
}
pInfo->pTableScanOp = pTableScanOp;
- pInfo->interval = pTSInfo->interval;
+ pInfo->interval = pTSInfo->pdInfo.interval;
pInfo->readHandle = *pHandle;
pInfo->tableUid = pScanPhyNode->uid;
@@ -2672,16 +2746,20 @@ static int32_t loadDataBlockFromOneTable(SOperatorInfo* pOperator, STableMergeSc
}
}
- int64_t st = taosGetTimestampMs();
- doFilter(pTableScanInfo->pFilterNode, pBlock, pTableScanInfo->pColMatchInfo);
+ if (pTableScanInfo->pFilterNode != NULL) {
+ int64_t st = taosGetTimestampMs();
+ doFilter(pTableScanInfo->pFilterNode, pBlock, pTableScanInfo->pColMatchInfo);
- int64_t et = taosGetTimestampMs();
- pTableScanInfo->readRecorder.filterTime += (et - st);
+ double el = (taosGetTimestampUs() - st) / 1000.0;
+ pTableScanInfo->readRecorder.filterTime += el;
- if (pBlock->info.rows == 0) {
- pCost->filterOutBlocks += 1;
- qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
- pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
+ if (pBlock->info.rows == 0) {
+ pCost->filterOutBlocks += 1;
+ qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d, elapsed time:%.2f ms",
+ GET_TASKID(pTaskInfo), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, el);
+ } else {
+ qDebug("%s data block filter applied, elapsed time:%.2f ms", GET_TASKID(pTaskInfo), el);
+ }
}
return TSDB_CODE_SUCCESS;
diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c
index ff856a48b6..c5d68676d2 100644
--- a/source/libs/executor/src/tfill.c
+++ b/source/libs/executor/src/tfill.c
@@ -66,7 +66,7 @@ static void setNullRow(SSDataBlock* pBlock, int64_t ts, int32_t rowIndex) {
static void doSetVal(SColumnInfoData* pDstColInfoData, int32_t rowIndex, const SGroupKeys* pKey);
-static void doSetUserSpecifiedValue(SColumnInfoData* pDst, SVariant* pVar, int32_t rowIndex, int64_t currentKey) {
+static void doSetUserSpecifiedValue(SColumnInfoData* pDst, SVariant* pVar, int32_t rowIndex, int64_t currentKey) {
if (pDst->info.type == TSDB_DATA_TYPE_FLOAT) {
float v = 0;
GET_TYPED_DATA(v, float, pVar->nType, &pVar->i);
@@ -184,7 +184,7 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock*
continue;
}
- SVariant* pVar = &pFillInfo->pFillCol[i].fillVal;
+ SVariant* pVar = &pFillInfo->pFillCol[i].fillVal;
SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, i);
doSetUserSpecifiedValue(pDst, pVar, index, pFillInfo->currentKey);
}
@@ -298,7 +298,7 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, SSDataBlock* pBlock, int32_t
SColumnInfoData* pSrc = taosArrayGet(pFillInfo->pSrcBlock->pDataBlock, srcSlotId);
char* src = colDataGetData(pSrc, pFillInfo->index);
- if (/*i == 0 || (*/!colDataIsNull_s(pSrc, pFillInfo->index)) {
+ if (/*i == 0 || (*/ !colDataIsNull_s(pSrc, pFillInfo->index)) {
bool isNull = colDataIsNull_s(pSrc, pFillInfo->index);
colDataAppend(pDst, index, src, isNull);
saveColData(pFillInfo->prev, i, src, isNull);
@@ -313,7 +313,7 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, SSDataBlock* pBlock, int32_t
} else if (pFillInfo->type == TSDB_FILL_LINEAR) {
bool isNull = colDataIsNull_s(pSrc, pFillInfo->index);
colDataAppend(pDst, index, src, isNull);
- saveColData(pFillInfo->prev, i, src, isNull); // todo:
+ saveColData(pFillInfo->prev, i, src, isNull); // todo:
} else if (pFillInfo->type == TSDB_FILL_NULL) {
colDataAppendNULL(pDst, index);
} else if (pFillInfo->type == TSDB_FILL_NEXT) {
@@ -433,9 +433,9 @@ static int32_t taosNumOfRemainRows(SFillInfo* pFillInfo) {
return pFillInfo->numOfRows - pFillInfo->index;
}
-struct SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols,
- SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, int32_t primaryTsSlotId,
- const char* id) {
+struct SFillInfo* taosCreateFillInfo(TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols,
+ SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol,
+ int32_t primaryTsSlotId, int32_t order, const char* id) {
if (fillType == TSDB_FILL_NONE) {
return NULL;
}
@@ -446,10 +446,9 @@ struct SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTag
return NULL;
}
- pFillInfo->tsSlotId = primaryTsSlotId;
-
- taosResetFillInfo(pFillInfo, skey);
pFillInfo->order = order;
+ pFillInfo->tsSlotId = primaryTsSlotId;
+ taosResetFillInfo(pFillInfo, skey);
switch (fillType) {
case FILL_MODE_NONE:
@@ -535,6 +534,14 @@ void* taosDestroyFillInfo(SFillInfo* pFillInfo) {
return NULL;
}
+void taosFillSetDataOrderInfo(SFillInfo* pFillInfo, int32_t order) {
+ if (pFillInfo == NULL || (order != TSDB_ORDER_ASC && order != TSDB_ORDER_DESC)) {
+ return;
+ }
+
+ pFillInfo->order = order;
+}
+
void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey) {
if (pFillInfo->type == TSDB_FILL_NONE) {
return;
@@ -581,7 +588,7 @@ int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, TSKEY ekey, int32_t ma
int64_t numOfRes = -1;
if (numOfRows > 0) { // still fill gap within current data block, not generating data after the result set.
- TSKEY lastKey = tsList[pFillInfo->numOfRows - 1];
+ TSKEY lastKey = (TSDB_ORDER_ASC == pFillInfo->order ? tsList[pFillInfo->numOfRows - 1] : tsList[0]);
numOfRes = taosTimeCountInterval(lastKey, pFillInfo->currentKey, pFillInfo->interval.sliding,
pFillInfo->interval.slidingUnit, pFillInfo->interval.precision);
numOfRes += 1;
@@ -626,9 +633,9 @@ int64_t taosFillResultDataBlock(SFillInfo* pFillInfo, SSDataBlock* p, int32_t ca
}
qDebug("fill:%p, generated fill result, src block:%d, index:%d, brange:%" PRId64 "-%" PRId64 ", currentKey:%" PRId64
- ", current : % d, total : % d, %s", pFillInfo,
- pFillInfo->numOfRows, pFillInfo->index, pFillInfo->start, pFillInfo->end, pFillInfo->currentKey,
- pFillInfo->numOfCurrent, pFillInfo->numOfTotal, pFillInfo->id);
+ ", current : % d, total : % d, %s",
+ pFillInfo, pFillInfo->numOfRows, pFillInfo->index, pFillInfo->start, pFillInfo->end, pFillInfo->currentKey,
+ pFillInfo->numOfCurrent, pFillInfo->numOfTotal, pFillInfo->id);
return numOfRes;
}
diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c
index cb18eb9fff..fb1cb8dff5 100644
--- a/source/libs/executor/src/timewindowoperator.c
+++ b/source/libs/executor/src/timewindowoperator.c
@@ -362,7 +362,7 @@ static void setNotInterpoWindowKey(SqlFunctionCtx* pCtx, int32_t numOfOutput, in
static bool setTimeWindowInterpolationStartTs(SIntervalAggOperatorInfo* pInfo, int32_t pos, SSDataBlock* pBlock,
const TSKEY* tsCols, STimeWindow* win, SExprSupp* pSup) {
- bool ascQuery = (pInfo->order == TSDB_ORDER_ASC);
+ bool ascQuery = (pInfo->inputOrder == TSDB_ORDER_ASC);
TSKEY curTs = tsCols[pos];
@@ -392,7 +392,7 @@ static bool setTimeWindowInterpolationStartTs(SIntervalAggOperatorInfo* pInfo, i
static bool setTimeWindowInterpolationEndTs(SIntervalAggOperatorInfo* pInfo, SExprSupp* pSup, int32_t endRowIndex,
SArray* pDataBlock, const TSKEY* tsCols, TSKEY blockEkey,
STimeWindow* win) {
- int32_t order = pInfo->order;
+ int32_t order = pInfo->inputOrder;
TSKEY actualEndKey = tsCols[endRowIndex];
TSKEY key = (order == TSDB_ORDER_ASC) ? win->ekey : win->skey;
@@ -550,7 +550,7 @@ static void doWindowBorderInterpolation(SIntervalAggOperatorInfo* pInfo, SSDataB
if (!done) {
int32_t endRowIndex = startPos + forwardRows - 1;
- TSKEY endKey = (pInfo->order == TSDB_ORDER_ASC) ? pBlock->info.window.ekey : pBlock->info.window.skey;
+ TSKEY endKey = (pInfo->inputOrder == TSDB_ORDER_ASC) ? pBlock->info.window.ekey : pBlock->info.window.skey;
bool interp = setTimeWindowInterpolationEndTs(pInfo, pSup, endRowIndex, pBlock->pDataBlock, tsCols, endKey, win);
if (interp) {
setResultRowInterpo(pResult, RESULT_ROW_END_INTERP);
@@ -639,7 +639,7 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num
setNotInterpoWindowKey(pSup->pCtx, numOfExprs, RESULT_ROW_START_INTERP);
doApplyFunctions(pTaskInfo, pSup->pCtx, &w, &pInfo->twAggSup.timeWindowData, startPos, 0, tsCols, pBlock->info.rows,
- numOfExprs, pInfo->order);
+ numOfExprs, pInfo->inputOrder);
if (isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP)) {
closeResultRow(pr);
@@ -924,11 +924,11 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
int32_t numOfOutput = pSup->numOfExprs;
int64_t* tsCols = extractTsCol(pBlock, pInfo);
uint64_t tableGroupId = pBlock->info.groupId;
- bool ascScan = (pInfo->order == TSDB_ORDER_ASC);
+ bool ascScan = (pInfo->inputOrder == TSDB_ORDER_ASC);
TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols);
SResultRow* pResult = NULL;
- STimeWindow win = getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->order);
+ STimeWindow win = getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->inputOrder);
int32_t ret = TSDB_CODE_SUCCESS;
if ((!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) &&
inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) {
@@ -946,7 +946,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
TSKEY ekey = ascScan ? win.ekey : win.skey;
int32_t forwardRows =
- getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->order);
+ getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->inputOrder);
ASSERT(forwardRows > 0);
// prev time window not interpolation yet.
@@ -969,7 +969,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) {
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true);
doApplyFunctions(pTaskInfo, pSup->pCtx, &win, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols,
- pBlock->info.rows, numOfOutput, pInfo->order);
+ pBlock->info.rows, numOfOutput, pInfo->inputOrder);
}
doCloseWindow(pResultRowInfo, pInfo, pResult);
@@ -977,14 +977,14 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
STimeWindow nextWin = win;
while (1) {
int32_t prevEndPos = forwardRows - 1 + startPos;
- startPos = getNextQualifiedWindow(&pInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, pInfo->order);
+ startPos = getNextQualifiedWindow(&pInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, pInfo->inputOrder);
if (startPos < 0) {
break;
}
if (pInfo->ignoreExpiredData && isCloseWindow(&nextWin, &pInfo->twAggSup)) {
ekey = ascScan ? nextWin.ekey : nextWin.skey;
forwardRows =
- getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->order);
+ getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->inputOrder);
continue;
}
@@ -1002,14 +1002,14 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
ekey = ascScan ? nextWin.ekey : nextWin.skey;
forwardRows =
- getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->order);
+ getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->inputOrder);
// window start(end) key interpolation
doWindowBorderInterpolation(pInfo, pBlock, pResult, &nextWin, startPos, forwardRows, pSup);
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true);
doApplyFunctions(pTaskInfo, pSup->pCtx, &nextWin, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols,
- pBlock->info.rows, numOfOutput, pInfo->order);
+ pBlock->info.rows, numOfOutput, pInfo->inputOrder);
doCloseWindow(pResultRowInfo, pInfo, pResult);
}
@@ -1082,7 +1082,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
break;
}
- getTableScanInfo(pOperator, &pInfo->order, &scanFlag);
+ getTableScanInfo(pOperator, &pInfo->inputOrder, &scanFlag);
if (pInfo->scalarSupp.pExprInfo != NULL) {
SExprSupp* pExprSup = &pInfo->scalarSupp;
@@ -1090,13 +1090,13 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
}
// the pDataBlock are always the same one, no need to call this again
- setInputDataBlock(pOperator, pSup->pCtx, pBlock, pInfo->order, scanFlag, true);
+ setInputDataBlock(pOperator, pSup->pCtx, pBlock, pInfo->inputOrder, scanFlag, true);
blockDataUpdateTsWindow(pBlock, pInfo->primaryTsIndex);
hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag, NULL);
}
- initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, pInfo->order);
+ initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, pInfo->resultTsOrder);
OPTR_SET_OPENED(pOperator);
pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
@@ -1218,7 +1218,7 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
doFilter(pInfo->pCondition, pBInfo->pRes, NULL);
- bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo);
+ bool hasRemain = hasRemainResults(&pInfo->groupResInfo);
if (!hasRemain) {
doSetOperatorCompleted(pOperator);
break;
@@ -1249,7 +1249,6 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) {
}
pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
-
pOperator->status = OP_RES_TO_RETURN;
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, TSDB_ORDER_ASC);
@@ -1258,7 +1257,7 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
doFilter(pInfo->pCondition, pBInfo->pRes, NULL);
- bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo);
+ bool hasRemain = hasRemainResults(&pInfo->groupResInfo);
if (!hasRemain) {
doSetOperatorCompleted(pOperator);
break;
@@ -1295,7 +1294,7 @@ static SSDataBlock* doBuildIntervalResult(SOperatorInfo* pOperator) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
doFilter(pInfo->pCondition, pBlock, NULL);
- bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo);
+ bool hasRemain = hasRemainResults(&pInfo->groupResInfo);
if (!hasRemain) {
doSetOperatorCompleted(pOperator);
break;
@@ -1550,7 +1549,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
SIntervalAggOperatorInfo* pInfo = pOperator->info;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
- pInfo->order = TSDB_ORDER_ASC;
+ pInfo->inputOrder = TSDB_ORDER_ASC;
SExprSupp* pSup = &pOperator->exprSupp;
if (pOperator->status == OP_EXEC_DONE) {
@@ -1564,7 +1563,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
}
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
- if (pInfo->binfo.pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) {
+ if (pInfo->binfo.pRes->info.rows == 0 || !hasRemainResults(&pInfo->groupResInfo)) {
pOperator->status = OP_EXEC_DONE;
qDebug("===stream===single interval is done");
freeAllPages(pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
@@ -1597,7 +1596,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
continue;
}
- if (pBlock->info.type == STREAM_NORMAL) {
+ if (pBlock->info.type == STREAM_NORMAL && pBlock->info.version != 0) {
// set input version
pTaskInfo->version = pBlock->info.version;
}
@@ -1610,7 +1609,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
// The timewindow that overlaps the timestamps of the input pBlock need to be recalculated and return to the
// caller. Note that all the time window are not close till now.
// the pDataBlock are always the same one, no need to call this again
- setInputDataBlock(pOperator, pSup->pCtx, pBlock, pInfo->order, MAIN_SCAN, true);
+ setInputDataBlock(pOperator, pSup->pCtx, pBlock, pInfo->inputOrder, MAIN_SCAN, true);
if (pInfo->invertible) {
setInverFunction(pSup->pCtx, pOperator->exprSupp.numOfExprs, pBlock->info.type);
}
@@ -1790,7 +1789,8 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
}
pInfo->win = pTaskInfo->window;
- pInfo->order = TSDB_ORDER_ASC;
+ pInfo->inputOrder = (pPhyNode->window.inputTsOrder == ORDER_ASC)? TSDB_ORDER_ASC:TSDB_ORDER_DESC;
+ pInfo->resultTsOrder = (pPhyNode->window.outputTsOrder == ORDER_ASC)? TSDB_ORDER_ASC:TSDB_ORDER_DESC;
pInfo->interval = *pInterval;
pInfo->execModel = pTaskInfo->execModel;
pInfo->twAggSup = *pTwAggSupp;
@@ -1807,7 +1807,6 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
}
pInfo->primaryTsIndex = primaryTsSlotId;
-
SExprSupp* pSup = &pOperator->exprSupp;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
@@ -1879,7 +1878,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExpr
goto _error;
}
- pInfo->order = TSDB_ORDER_ASC;
+ pInfo->inputOrder = TSDB_ORDER_ASC;
pInfo->interval = *pInterval;
pInfo->execModel = OPTR_EXEC_MODEL_STREAM;
pInfo->win = pTaskInfo->window;
@@ -2011,7 +2010,7 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
doFilter(pInfo->pCondition, pBInfo->pRes, NULL);
- bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo);
+ bool hasRemain = hasRemainResults(&pInfo->groupResInfo);
if (!hasRemain) {
doSetOperatorCompleted(pOperator);
break;
@@ -2054,7 +2053,7 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
doFilter(pInfo->pCondition, pBInfo->pRes, NULL);
- bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo);
+ bool hasRemain = hasRemainResults(&pInfo->groupResInfo);
if (!hasRemain) {
doSetOperatorCompleted(pOperator);
break;
@@ -2220,7 +2219,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
// if (pOperator->status == OP_RES_TO_RETURN) {
// // doBuildResultDatablock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pIntervalInfo->pRes);
- // if (pResBlock->info.rows == 0 || !hasDataInGroupInfo(&pSliceInfo->groupResInfo)) {
+ // if (pResBlock->info.rows == 0 || !hasRemainResults(&pSliceInfo->groupResInfo)) {
// doSetOperatorCompleted(pOperator);
// }
//
@@ -3098,6 +3097,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);
pInfo->delIndex = 0;
pInfo->pDelWins = taosArrayInit(4, sizeof(SWinRes));
+ pInfo->pRecycledPages = taosArrayInit(4, sizeof(int32_t));
pOperator->operatorType = pPhyNode->type;
pOperator->blocking = true;
@@ -3140,6 +3140,26 @@ void destroyStreamAggSupporter(SStreamAggSupporter* pSup) {
blockDataDestroy(pSup->pScanBlock);
}
+void destroyStateWinInfo(void* ptr) {
+ if (ptr == NULL) {
+ return;
+ }
+ SStateWindowInfo* pWin = (SStateWindowInfo*) ptr;
+ taosMemoryFreeClear(pWin->stateKey.pData);
+}
+
+void destroyStateStreamAggSupporter(SStreamAggSupporter* pSup) {
+ taosMemoryFreeClear(pSup->pKeyBuf);
+ void** pIte = NULL;
+ while ((pIte = taosHashIterate(pSup->pResultRows, pIte)) != NULL) {
+ SArray* pWins = (SArray*)(*pIte);
+ taosArrayDestroyEx(pWins, (FDelete)destroyStateWinInfo);
+ }
+ taosHashCleanup(pSup->pResultRows);
+ destroyDiskbasedBuf(pSup->pResultBuf);
+ blockDataDestroy(pSup->pScanBlock);
+}
+
void destroyStreamSessionAggOperatorInfo(void* param, int32_t numOfOutput) {
SStreamSessionAggOperatorInfo* pInfo = (SStreamSessionAggOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
@@ -3607,12 +3627,17 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData
}
}
-void deleteWindow(SArray* pWinInfos, int32_t index) {
+void deleteWindow(SArray* pWinInfos, int32_t index, FDelete fp) {
ASSERT(index >= 0 && index < taosArrayGetSize(pWinInfos));
+ if (fp) {
+ void* ptr = taosArrayGet(pWinInfos, index);
+ fp(ptr);
+ }
taosArrayRemove(pWinInfos, index);
}
-static void doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, int64_t gap, SArray* result) {
+static void doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, int64_t gap,
+ SArray* result, FDelete fp) {
SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
TSKEY* startDatas = (TSKEY*)pStartTsCol->pData;
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
@@ -3626,7 +3651,7 @@ static void doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBloc
if (!pCurWin) {
break;
}
- deleteWindow(pAggSup->pCurWins, winIndex);
+ deleteWindow(pAggSup->pCurWins, winIndex, fp);
if (result) {
taosArrayPush(result, pCurWin);
}
@@ -3751,7 +3776,7 @@ SResultWindowInfo* getResWinForSession(void* pData) { return (SResultWindowInfo*
SResultWindowInfo* getResWinForState(void* pData) { return &((SStateWindowInfo*)pData)->winInfo; }
int32_t closeSessionWindow(SHashObj* pHashMap, STimeWindowAggSupp* pTwSup, SArray* pClosed, __get_win_info_ fn,
- bool delete) {
+ bool delete, FDelete fp) {
// Todo(liuyao) save window to tdb
void** pIte = NULL;
size_t keyLen = 0;
@@ -3773,7 +3798,7 @@ int32_t closeSessionWindow(SHashObj* pHashMap, STimeWindowAggSupp* pTwSup, SArra
pSeWin->isOutput = true;
}
if (delete) {
- deleteWindow(pWins, i);
+ deleteWindow(pWins, i, fp);
i--;
size = taosArrayGetSize(pWins);
}
@@ -3786,13 +3811,13 @@ int32_t closeSessionWindow(SHashObj* pHashMap, STimeWindowAggSupp* pTwSup, SArra
return TSDB_CODE_SUCCESS;
}
-static void closeChildSessionWindow(SArray* pChildren, TSKEY maxTs, bool delete) {
+static void closeChildSessionWindow(SArray* pChildren, TSKEY maxTs, bool delete, FDelete fp) {
int32_t size = taosArrayGetSize(pChildren);
for (int32_t i = 0; i < size; i++) {
SOperatorInfo* pChildOp = taosArrayGetP(pChildren, i);
SStreamSessionAggOperatorInfo* pChInfo = pChildOp->info;
pChInfo->twAggSup.maxTs = TMAX(pChInfo->twAggSup.maxTs, maxTs);
- closeSessionWindow(pChInfo->streamAggSup.pResultRows, &pChInfo->twAggSup, NULL, getResWinForSession, delete);
+ closeSessionWindow(pChInfo->streamAggSup.pResultRows, &pChInfo->twAggSup, NULL, getResWinForSession, delete, fp);
}
}
@@ -3835,7 +3860,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
return pInfo->pDelRes;
}
doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf);
- if (pBInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) {
+ if (pBInfo->pRes->info.rows == 0 || !hasRemainResults(&pInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator);
}
printDataBlock(pBInfo->pRes, IS_FINAL_OP(pInfo) ? "final session" : "single session");
@@ -3870,13 +3895,13 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
} else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) {
SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo));
// gap must be 0
- doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, pWins);
+ doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, pWins, NULL);
if (IS_FINAL_OP(pInfo)) {
int32_t childIndex = getChildIndex(pBlock);
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex);
SStreamSessionAggOperatorInfo* pChildInfo = pChildOp->info;
// gap must be 0
- doDeleteTimeWindows(&pChildInfo->streamAggSup, pBlock, 0, NULL);
+ doDeleteTimeWindows(&pChildInfo->streamAggSup, pBlock, 0, NULL, NULL);
rebuildTimeWindow(pInfo, pWins, pBlock->info.groupId, pOperator->exprSupp.numOfExprs, pOperator);
}
copyDeleteWindowInfo(pWins, pInfo->pStDeleted);
@@ -3918,8 +3943,8 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
pOperator->status = OP_RES_TO_RETURN;
closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pUpdated, getResWinForSession,
- pInfo->ignoreExpiredData);
- closeChildSessionWindow(pInfo->pChildren, pInfo->twAggSup.maxTs, pInfo->ignoreExpiredData);
+ pInfo->ignoreExpiredData, NULL);
+ closeChildSessionWindow(pInfo->pChildren, pInfo->twAggSup.maxTs, pInfo->ignoreExpiredData, NULL);
copyUpdateResult(pStUpdated, pUpdated);
taosHashCleanup(pStUpdated);
@@ -4014,7 +4039,7 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
break;
} else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) {
// gap must be 0
- doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, NULL);
+ doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, NULL, NULL);
copyDataBlock(pInfo->pDelRes, pBlock);
pInfo->pDelRes->info.type = STREAM_DELETE_RESULT;
break;
@@ -4120,7 +4145,7 @@ _error:
void destroyStreamStateOperatorInfo(void* param, int32_t numOfOutput) {
SStreamStateAggOperatorInfo* pInfo = (SStreamStateAggOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
- destroyStreamAggSupporter(&pInfo->streamAggSup);
+ destroyStateStreamAggSupporter(&pInfo->streamAggSup);
cleanupGroupResInfo(&pInfo->groupResInfo);
if (pInfo->pChildren != NULL) {
int32_t size = taosArrayGetSize(pInfo->pChildren);
@@ -4132,6 +4157,10 @@ void destroyStreamStateOperatorInfo(void* param, int32_t numOfOutput) {
taosMemoryFreeClear(pChInfo);
}
}
+ colDataDestroy(&pInfo->twAggSup.timeWindowData);
+ blockDataDestroy(pInfo->pDelRes);
+ taosHashCleanup(pInfo->pSeDeleted);
+ destroySqlFunctionCtx(pInfo->pDummyCtx, 0);
taosMemoryFreeClear(param);
}
@@ -4314,7 +4343,7 @@ static void doClearStateWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBloc
pSeDeleted);
ASSERT(isTsInWindow(pCurWin, tsCol[i]) || isEqualStateKey(pCurWin, pKeyData));
taosHashRemove(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition));
- deleteWindow(pAggSup->pCurWins, winIndex);
+ deleteWindow(pAggSup->pCurWins, winIndex, destroyStateWinInfo);
}
}
@@ -4357,7 +4386,7 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
appendOneRow(pAggSup->pScanBlock, &pCurWin->winInfo.win.skey, &pCurWin->winInfo.win.ekey,
&pSDataBlock->info.groupId);
taosHashRemove(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition));
- deleteWindow(pAggSup->pCurWins, winIndex);
+ deleteWindow(pAggSup->pCurWins, winIndex, destroyStateWinInfo);
continue;
}
code = doOneStateWindowAgg(pInfo, pSDataBlock, &pCurWin->winInfo, &pResult, i, winRows, numOfOutput, pOperator);
@@ -4391,7 +4420,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
return pInfo->pDelRes;
}
doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf);
- if (pBInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) {
+ if (pBInfo->pRes->info.rows == 0 || !hasRemainResults(&pInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator);
}
printDataBlock(pBInfo->pRes, "single state");
@@ -4415,7 +4444,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
continue;
} else if (pBlock->info.type == STREAM_DELETE_DATA) {
SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo));
- doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, pWins);
+ doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, pWins, destroyStateWinInfo);
copyDeleteWindowInfo(pWins, pInfo->pSeDeleted);
taosArrayDestroy(pWins);
continue;
@@ -4437,8 +4466,8 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
pOperator->status = OP_RES_TO_RETURN;
closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pUpdated, getResWinForState,
- pInfo->ignoreExpiredData);
- closeChildSessionWindow(pInfo->pChildren, pInfo->twAggSup.maxTs, pInfo->ignoreExpiredData);
+ pInfo->ignoreExpiredData, destroyStateWinInfo);
+ // closeChildSessionWindow(pInfo->pChildren, pInfo->twAggSup.maxTs, pInfo->ignoreExpiredData, destroyStateWinInfo);
copyUpdateResult(pSeUpdated, pUpdated);
taosHashCleanup(pSeUpdated);
@@ -4564,7 +4593,7 @@ static int32_t outputMergeAlignedIntervalResult(SOperatorInfo* pOperatorInfo, ui
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
SExprSupp* pSup = &pOperatorInfo->exprSupp;
- bool ascScan = (iaInfo->order == TSDB_ORDER_ASC);
+ bool ascScan = (iaInfo->inputOrder == TSDB_ORDER_ASC);
SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &wstartTs, TSDB_KEYSIZE, tableGroupId);
SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf,
@@ -4618,7 +4647,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
} else {
updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, true);
doApplyFunctions(pTaskInfo, pSup->pCtx, &currWin, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos,
- tsCols, pBlock->info.rows, numOfOutput, iaInfo->order);
+ tsCols, pBlock->info.rows, numOfOutput, iaInfo->inputOrder);
outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, currTs);
@@ -4637,7 +4666,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
}
updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, true);
doApplyFunctions(pTaskInfo, pSup->pCtx, &currWin, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos,
- tsCols, pBlock->info.rows, numOfOutput, iaInfo->order);
+ tsCols, pBlock->info.rows, numOfOutput, iaInfo->inputOrder);
outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, currTs);
}
@@ -4682,8 +4711,8 @@ static SSDataBlock* doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
break;
}
- getTableScanInfo(pOperator, &iaInfo->order, &scanFlag);
- setInputDataBlock(pOperator, pSup->pCtx, pBlock, iaInfo->order, scanFlag, true);
+ getTableScanInfo(pOperator, &iaInfo->inputOrder, &scanFlag);
+ setInputDataBlock(pOperator, pSup->pCtx, pBlock, iaInfo->inputOrder, scanFlag, true);
doMergeAlignedIntervalAggImpl(pOperator, &iaInfo->binfo.resultRowInfo, pBlock, scanFlag, pRes);
doFilter(miaInfo->pCondition, pRes, NULL);
if (pRes->info.rows >= pOperator->resultInfo.capacity) {
@@ -4724,7 +4753,7 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
miaInfo->pCondition = pCondition;
iaInfo->win = pTaskInfo->window;
- iaInfo->order = TSDB_ORDER_ASC;
+ iaInfo->inputOrder = TSDB_ORDER_ASC;
iaInfo->interval = *pInterval;
iaInfo->execModel = pTaskInfo->execModel;
iaInfo->primaryTsIndex = primaryTsSlotId;
@@ -4806,7 +4835,7 @@ static int32_t finalizeWindowResult(SOperatorInfo* pOperatorInfo, uint64_t table
SMergeIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info;
SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo;
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
- bool ascScan = (iaInfo->order == TSDB_ORDER_ASC);
+ bool ascScan = (iaInfo->inputOrder == TSDB_ORDER_ASC);
SExprSupp* pExprSup = &pOperatorInfo->exprSupp;
SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &win->skey, TSDB_KEYSIZE, tableGroupId);
@@ -4824,7 +4853,7 @@ static int32_t outputPrevIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t t
SMergeIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info;
SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo;
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
- bool ascScan = (iaInfo->order == TSDB_ORDER_ASC);
+ bool ascScan = (iaInfo->inputOrder == TSDB_ORDER_ASC);
SExprSupp* pExprSup = &pOperatorInfo->exprSupp;
SGroupTimeWindow groupTimeWindow = {.groupId = tableGroupId, .window = *newWin};
@@ -4860,12 +4889,12 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo*
int32_t numOfOutput = pExprSup->numOfExprs;
int64_t* tsCols = extractTsCol(pBlock, iaInfo);
uint64_t tableGroupId = pBlock->info.groupId;
- bool ascScan = (iaInfo->order == TSDB_ORDER_ASC);
+ bool ascScan = (iaInfo->inputOrder == TSDB_ORDER_ASC);
TSKEY blockStartTs = getStartTsKey(&pBlock->info.window, tsCols);
SResultRow* pResult = NULL;
STimeWindow win =
- getActiveTimeWindow(iaInfo->aggSup.pResultBuf, pResultRowInfo, blockStartTs, &iaInfo->interval, iaInfo->order);
+ getActiveTimeWindow(iaInfo->aggSup.pResultBuf, pResultRowInfo, blockStartTs, &iaInfo->interval, iaInfo->inputOrder);
int32_t ret =
setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pExprSup->pCtx,
@@ -4876,7 +4905,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo*
TSKEY ekey = ascScan ? win.ekey : win.skey;
int32_t forwardRows =
- getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, iaInfo->order);
+ getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, iaInfo->inputOrder);
ASSERT(forwardRows > 0);
// prev time window not interpolation yet.
@@ -4897,7 +4926,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo*
updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &win, true);
doApplyFunctions(pTaskInfo, pExprSup->pCtx, &win, &iaInfo->twAggSup.timeWindowData, startPos, forwardRows, tsCols,
- pBlock->info.rows, numOfOutput, iaInfo->order);
+ pBlock->info.rows, numOfOutput, iaInfo->inputOrder);
doCloseWindow(pResultRowInfo, iaInfo, pResult);
// output previous interval results after this interval (&win) is closed
@@ -4906,7 +4935,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo*
STimeWindow nextWin = win;
while (1) {
int32_t prevEndPos = forwardRows - 1 + startPos;
- startPos = getNextQualifiedWindow(&iaInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, iaInfo->order);
+ startPos = getNextQualifiedWindow(&iaInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, iaInfo->inputOrder);
if (startPos < 0) {
break;
}
@@ -4921,14 +4950,14 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo*
ekey = ascScan ? nextWin.ekey : nextWin.skey;
forwardRows =
- getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, iaInfo->order);
+ getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, iaInfo->inputOrder);
// window start(end) key interpolation
doWindowBorderInterpolation(iaInfo, pBlock, pResult, &nextWin, startPos, forwardRows, pExprSup);
updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &nextWin, true);
doApplyFunctions(pTaskInfo, pExprSup->pCtx, &nextWin, &iaInfo->twAggSup.timeWindowData, startPos, forwardRows,
- tsCols, pBlock->info.rows, numOfOutput, iaInfo->order);
+ tsCols, pBlock->info.rows, numOfOutput, iaInfo->inputOrder);
doCloseWindow(pResultRowInfo, iaInfo, pResult);
// output previous interval results after this interval (&nextWin) is closed
@@ -4982,8 +5011,8 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) {
break;
}
- getTableScanInfo(pOperator, &iaInfo->order, &scanFlag);
- setInputDataBlock(pOperator, pExpSupp->pCtx, pBlock, iaInfo->order, scanFlag, true);
+ getTableScanInfo(pOperator, &iaInfo->inputOrder, &scanFlag);
+ setInputDataBlock(pOperator, pExpSupp->pCtx, pBlock, iaInfo->inputOrder, scanFlag, true);
doMergeIntervalAggImpl(pOperator, &iaInfo->binfo.resultRowInfo, pBlock, scanFlag, pRes);
if (pRes->info.rows >= pOperator->resultInfo.threshold) {
@@ -5025,9 +5054,8 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprI
miaInfo->groupIntervals = tdListNew(sizeof(SGroupTimeWindow));
SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo;
-
iaInfo->win = pTaskInfo->window;
- iaInfo->order = TSDB_ORDER_ASC;
+ iaInfo->inputOrder = TSDB_ORDER_ASC;
iaInfo->interval = *pInterval;
iaInfo->execModel = pTaskInfo->execModel;
diff --git a/source/libs/function/inc/builtins.h b/source/libs/function/inc/builtins.h
index f5efcd5206..467fb11ae0 100644
--- a/source/libs/function/inc/builtins.h
+++ b/source/libs/function/inc/builtins.h
@@ -26,6 +26,7 @@ typedef int32_t (*FTranslateFunc)(SFunctionNode* pFunc, char* pErrBuf, int32_t l
typedef EFuncDataRequired (*FFuncDataRequired)(SFunctionNode* pFunc, STimeWindow* pTimeWindow);
typedef int32_t (*FCreateMergeFuncParameters)(SNodeList* pRawParameters, SNode* pPartialRes, SNodeList** pParameters);
typedef EFuncDataRequired (*FFuncDynDataRequired)(void* pRes, STimeWindow* pTimeWindow);
+typedef EFuncReturnRows (*FEstimateReturnRows)(SFunctionNode* pFunc);
typedef struct SBuiltinFuncDefinition {
const char* name;
@@ -44,6 +45,7 @@ typedef struct SBuiltinFuncDefinition {
const char* pPartialFunc;
const char* pMergeFunc;
FCreateMergeFuncParameters createMergeParaFuc;
+ FEstimateReturnRows estimateReturnRowsFunc;
} SBuiltinFuncDefinition;
extern const SBuiltinFuncDefinition funcMgtBuiltins[];
diff --git a/source/libs/function/inc/builtinsimpl.h b/source/libs/function/inc/builtinsimpl.h
index 35669b3e42..0880f2f5c7 100644
--- a/source/libs/function/inc/builtinsimpl.h
+++ b/source/libs/function/inc/builtinsimpl.h
@@ -118,6 +118,7 @@ int32_t firstLastPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
int32_t firstCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
int32_t lastCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
int32_t getFirstLastInfoSize(int32_t resBytes);
+EFuncDataRequired lastDynDataReq(void* pRes, STimeWindow* pTimeWindow);
int32_t lastRowFunction(SqlFunctionCtx *pCtx);
diff --git a/source/libs/function/inc/functionMgtInt.h b/source/libs/function/inc/functionMgtInt.h
index c79306f1e4..10cc20403c 100644
--- a/source/libs/function/inc/functionMgtInt.h
+++ b/source/libs/function/inc/functionMgtInt.h
@@ -48,6 +48,7 @@ extern "C" {
#define FUNC_MGT_CLIENT_PC_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(19)
#define FUNC_MGT_MULTI_ROWS_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(20)
#define FUNC_MGT_KEEP_ORDER_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(21)
+#define FUNC_MGT_CUMULATIVE_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(22)
#define FUNC_MGT_TEST_MASK(val, mask) (((val) & (mask)) != 0)
diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c
index 01a5e7997e..1db8264354 100644
--- a/source/libs/function/src/builtins.c
+++ b/source/libs/function/src/builtins.c
@@ -1277,6 +1277,8 @@ static int32_t translateCsum(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
return TSDB_CODE_SUCCESS;
}
+static EFuncReturnRows csumEstReturnRows(SFunctionNode* pFunc) { return FUNC_RETURN_ROWS_N; }
+
static int32_t translateMavg(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
if (2 != LIST_LENGTH(pFunc->pParameterList)) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
@@ -1416,6 +1418,11 @@ static int32_t translateDerivative(SFunctionNode* pFunc, char* pErrBuf, int32_t
return TSDB_CODE_SUCCESS;
}
+static EFuncReturnRows derivativeEstReturnRows(SFunctionNode* pFunc) {
+ return 1 == ((SValueNode*)nodesListGetNode(pFunc->pParameterList, 2))->datum.i ? FUNC_RETURN_ROWS_INDEFINITE
+ : FUNC_RETURN_ROWS_N_MINUS_1;
+}
+
static int32_t translateIrate(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
@@ -1551,6 +1558,14 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
return TSDB_CODE_SUCCESS;
}
+static EFuncReturnRows diffEstReturnRows(SFunctionNode* pFunc) {
+ if (1 == LIST_LENGTH(pFunc->pParameterList)) {
+ return FUNC_RETURN_ROWS_N_MINUS_1;
+ }
+ return 1 == ((SValueNode*)nodesListGetNode(pFunc->pParameterList, 1))->datum.i ? FUNC_RETURN_ROWS_INDEFINITE
+ : FUNC_RETURN_ROWS_N_MINUS_1;
+}
+
static int32_t translateLength(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
@@ -2068,7 +2083,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "apercentile",
.type = FUNCTION_TYPE_APERCENTILE,
- .classification = FUNC_MGT_AGG_FUNC,
+ .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC,
.translateFunc = translateApercentile,
.getEnvFunc = getApercentileFuncEnv,
.initFunc = apercentileFunctionSetup,
@@ -2083,7 +2098,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
},
{
.name = "_apercentile_partial",
- .type = FUNCTION_TYPE_APERCENTILE_PARTIAL,
+ .type = FUNCTION_TYPE_APERCENTILE_PARTIAL | FUNC_MGT_TIMELINE_FUNC,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateApercentilePartial,
.getEnvFunc = getApercentileFuncEnv,
@@ -2096,7 +2111,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "_apercentile_merge",
.type = FUNCTION_TYPE_APERCENTILE_MERGE,
- .classification = FUNC_MGT_AGG_FUNC,
+ .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC,
.translateFunc = translateApercentileMerge,
.getEnvFunc = getApercentileFuncEnv,
.initFunc = apercentileFunctionSetup,
@@ -2231,13 +2246,14 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "derivative",
.type = FUNCTION_TYPE_DERIVATIVE,
- .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
+ .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_CUMULATIVE_FUNC,
.translateFunc = translateDerivative,
.getEnvFunc = getDerivativeFuncEnv,
.initFunc = derivativeFuncSetup,
.processFunc = derivativeFunction,
.sprocessFunc = derivativeScalarFunction,
- .finalizeFunc = functionFinalize
+ .finalizeFunc = functionFinalize,
+ .estimateReturnRowsFunc = derivativeEstReturnRows
},
{
.name = "irate",
@@ -2312,6 +2328,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.type = FUNCTION_TYPE_LAST,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
.translateFunc = translateFirstLast,
+ .dynDataRequiredFunc = lastDynDataReq,
.getEnvFunc = getFirstLastFuncEnv,
.initFunc = functionSetup,
.processFunc = lastFunction,
@@ -2358,7 +2375,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "histogram",
.type = FUNCTION_TYPE_HISTOGRAM,
- .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
+ .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
.translateFunc = translateHistogram,
.getEnvFunc = getHistogramFuncEnv,
.initFunc = histogramFunctionSetup,
@@ -2373,7 +2390,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "_histogram_partial",
.type = FUNCTION_TYPE_HISTOGRAM_PARTIAL,
- .classification = FUNC_MGT_AGG_FUNC,
+ .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
.translateFunc = translateHistogramPartial,
.getEnvFunc = getHistogramFuncEnv,
.initFunc = histogramFunctionSetup,
@@ -2385,7 +2402,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "_histogram_merge",
.type = FUNCTION_TYPE_HISTOGRAM_MERGE,
- .classification = FUNC_MGT_AGG_FUNC,
+ .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
.translateFunc = translateHistogramMerge,
.getEnvFunc = getHistogramFuncEnv,
.initFunc = functionSetup,
@@ -2397,7 +2414,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "hyperloglog",
.type = FUNCTION_TYPE_HYPERLOGLOG,
- .classification = FUNC_MGT_AGG_FUNC,
+ .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC,
.translateFunc = translateHLL,
.getEnvFunc = getHLLFuncEnv,
.initFunc = functionSetup,
@@ -2411,7 +2428,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
},
{
.name = "_hyperloglog_partial",
- .type = FUNCTION_TYPE_HYPERLOGLOG_PARTIAL,
+ .type = FUNCTION_TYPE_HYPERLOGLOG_PARTIAL | FUNC_MGT_TIMELINE_FUNC,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateHLLPartial,
.getEnvFunc = getHLLFuncEnv,
@@ -2423,7 +2440,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
},
{
.name = "_hyperloglog_merge",
- .type = FUNCTION_TYPE_HYPERLOGLOG_MERGE,
+ .type = FUNCTION_TYPE_HYPERLOGLOG_MERGE | FUNC_MGT_TIMELINE_FUNC,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateHLLMerge,
.getEnvFunc = getHLLFuncEnv,
@@ -2436,13 +2453,14 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "diff",
.type = FUNCTION_TYPE_DIFF,
- .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
+ .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_CUMULATIVE_FUNC,
.translateFunc = translateDiff,
.getEnvFunc = getDiffFuncEnv,
.initFunc = diffFunctionSetup,
.processFunc = diffFunction,
.sprocessFunc = diffScalarFunction,
- .finalizeFunc = functionFinalize
+ .finalizeFunc = functionFinalize,
+ .estimateReturnRowsFunc = diffEstReturnRows
},
{
.name = "statecount",
@@ -2469,13 +2487,14 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "csum",
.type = FUNCTION_TYPE_CSUM,
- .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
+ .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_CUMULATIVE_FUNC,
.translateFunc = translateCsum,
.getEnvFunc = getCsumFuncEnv,
.initFunc = functionSetup,
.processFunc = csumFunction,
.sprocessFunc = csumScalarFunction,
- .finalizeFunc = NULL
+ .finalizeFunc = NULL,
+ .estimateReturnRowsFunc = csumEstReturnRows,
},
{
.name = "mavg",
diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c
index 200df6bc80..0767c2e5a2 100644
--- a/source/libs/function/src/builtinsimpl.c
+++ b/source/libs/function/src/builtinsimpl.c
@@ -2700,6 +2700,22 @@ int32_t apercentileCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx)
return TSDB_CODE_SUCCESS;
}
+EFuncDataRequired lastDynDataReq(void* pRes, STimeWindow* pTimeWindow) {
+ SResultRowEntryInfo* pEntry = (SResultRowEntryInfo*) pRes;
+
+ // not initialized yet, data is required
+ if (pEntry == NULL) {
+ return FUNC_DATA_REQUIRED_DATA_LOAD;
+ }
+
+ SFirstLastRes* pResult = GET_ROWCELL_INTERBUF(pEntry);
+ if (pResult->hasResult && pResult->ts >= pTimeWindow->ekey) {
+ return FUNC_DATA_REQUIRED_NOT_LOAD;
+ } else {
+ return FUNC_DATA_REQUIRED_DATA_LOAD;
+ }
+}
+
int32_t getFirstLastInfoSize(int32_t resBytes) { return sizeof(SFirstLastRes) + resBytes; }
bool getFirstLastFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c
index 020fd648e1..5fc4e7882c 100644
--- a/source/libs/function/src/functionMgt.c
+++ b/source/libs/function/src/functionMgt.c
@@ -89,6 +89,14 @@ int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen) {
return TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION;
}
+EFuncReturnRows fmGetFuncReturnRows(SFunctionNode* pFunc) {
+ if (NULL != funcMgtBuiltins[pFunc->funcId].estimateReturnRowsFunc) {
+ return funcMgtBuiltins[pFunc->funcId].estimateReturnRowsFunc(pFunc);
+ }
+ return (fmIsIndefiniteRowsFunc(pFunc->funcId) || fmIsMultiRowsFunc(pFunc->funcId)) ? FUNC_RETURN_ROWS_INDEFINITE
+ : FUNC_RETURN_ROWS_NORMAL;
+}
+
bool fmIsBuiltinFunc(const char* pFunc) {
return NULL != taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc, strlen(pFunc));
}
@@ -107,7 +115,12 @@ EFuncDataRequired fmFuncDynDataRequired(int32_t funcId, void* pRes, STimeWindow*
if (fmIsUserDefinedFunc(funcId) || funcId < 0 || funcId >= funcMgtBuiltinsNum) {
return TSDB_CODE_FAILED;
}
- return funcMgtBuiltins[funcId].dynDataRequiredFunc(pRes, pTimeWindow);
+
+ if (funcMgtBuiltins[funcId].dynDataRequiredFunc == NULL) {
+ return FUNC_DATA_REQUIRED_DATA_LOAD;
+ } else {
+ return funcMgtBuiltins[funcId].dynDataRequiredFunc(pRes, pTimeWindow);
+ }
}
int32_t fmGetFuncExecFuncs(int32_t funcId, SFuncExecFuncs* pFpSet) {
@@ -192,6 +205,8 @@ bool fmIsMultiRowsFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, F
bool fmIsKeepOrderFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_KEEP_ORDER_FUNC); }
+bool fmIsCumulativeFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_CUMULATIVE_FUNC); }
+
bool fmIsInterpFunc(int32_t funcId) {
if (funcId < 0 || funcId >= funcMgtBuiltinsNum) {
return false;
diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c
index 5fc94c2642..79ef18eeb6 100644
--- a/source/libs/nodes/src/nodesCloneFuncs.c
+++ b/source/libs/nodes/src/nodesCloneFuncs.c
@@ -443,6 +443,7 @@ static int32_t logicWindowCopy(const SWindowLogicNode* pSrc, SWindowLogicNode* p
COPY_SCALAR_FIELD(igExpired);
COPY_SCALAR_FIELD(windowAlgo);
COPY_SCALAR_FIELD(inputTsOrder);
+ COPY_SCALAR_FIELD(outputTsOrder);
return TSDB_CODE_SUCCESS;
}
@@ -452,6 +453,7 @@ static int32_t logicFillCopy(const SFillLogicNode* pSrc, SFillLogicNode* pDst) {
CLONE_NODE_FIELD(pWStartTs);
CLONE_NODE_FIELD(pValues);
COPY_OBJECT_FIELD(timeRange, sizeof(STimeWindow));
+ COPY_SCALAR_FIELD(inputTsOrder);
return TSDB_CODE_SUCCESS;
}
diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c
index af3f0c242b..b0c16f26ed 100644
--- a/source/libs/nodes/src/nodesCodeFuncs.c
+++ b/source/libs/nodes/src/nodesCodeFuncs.c
@@ -1936,6 +1936,8 @@ static const char* jkWindowPhysiPlanTsEnd = "TsEnd";
static const char* jkWindowPhysiPlanTriggerType = "TriggerType";
static const char* jkWindowPhysiPlanWatermark = "Watermark";
static const char* jkWindowPhysiPlanIgnoreExpired = "IgnoreExpired";
+static const char* jkWindowPhysiPlanInputTsOrder = "inputTsOrder";
+static const char* jkWindowPhysiPlanOutputTsOrder = "outputTsOrder";
static int32_t physiWindowNodeToJson(const void* pObj, SJson* pJson) {
const SWinodwPhysiNode* pNode = (const SWinodwPhysiNode*)pObj;
@@ -1962,6 +1964,12 @@ static int32_t physiWindowNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkWindowPhysiPlanIgnoreExpired, pNode->igExpired);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkWindowPhysiPlanInputTsOrder, pNode->inputTsOrder);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkWindowPhysiPlanOutputTsOrder, pNode->outputTsOrder);
+ }
return code;
}
@@ -1991,6 +1999,12 @@ static int32_t jsonToPhysiWindowNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetTinyIntValue(pJson, jkWindowPhysiPlanIgnoreExpired, &pNode->igExpired);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ tjsonGetNumberValue(pJson, jkWindowPhysiPlanInputTsOrder, pNode->inputTsOrder, code);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ tjsonGetNumberValue(pJson, jkWindowPhysiPlanOutputTsOrder, pNode->outputTsOrder, code);
+ }
return code;
}
@@ -2053,6 +2067,7 @@ static const char* jkFillPhysiPlanValues = "Values";
static const char* jkFillPhysiPlanTargets = "Targets";
static const char* jkFillPhysiPlanStartTime = "StartTime";
static const char* jkFillPhysiPlanEndTime = "EndTime";
+static const char* jkFillPhysiPlanInputTsOrder = "inputTsOrder";
static int32_t physiFillNodeToJson(const void* pObj, SJson* pJson) {
const SFillPhysiNode* pNode = (const SFillPhysiNode*)pObj;
@@ -2076,6 +2091,9 @@ static int32_t physiFillNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkFillPhysiPlanEndTime, pNode->timeRange.ekey);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkFillPhysiPlanInputTsOrder, pNode->inputTsOrder);
+ }
return code;
}
@@ -2103,6 +2121,9 @@ static int32_t jsonToPhysiFillNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBigIntValue(pJson, jkFillPhysiPlanEndTime, &pNode->timeRange.ekey);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ tjsonGetNumberValue(pJson, jkFillPhysiPlanInputTsOrder, pNode->inputTsOrder, code);
+ }
return code;
}
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index 8e1243b558..9e8b28f362 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -1112,12 +1112,16 @@ static int32_t translateIndefiniteRowsFunc(STranslateContext* pCxt, SFunctionNod
if (!fmIsIndefiniteRowsFunc(pFunc->funcId)) {
return TSDB_CODE_SUCCESS;
}
- if (!isSelectStmt(pCxt->pCurrStmt) || SQL_CLAUSE_SELECT != pCxt->currClause ||
- ((SSelectStmt*)pCxt->pCurrStmt)->hasIndefiniteRowsFunc || ((SSelectStmt*)pCxt->pCurrStmt)->hasAggFuncs ||
- ((SSelectStmt*)pCxt->pCurrStmt)->hasMultiRowsFunc) {
+ if (!isSelectStmt(pCxt->pCurrStmt) || SQL_CLAUSE_SELECT != pCxt->currClause) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
}
- if (NULL != ((SSelectStmt*)pCxt->pCurrStmt)->pWindow || NULL != ((SSelectStmt*)pCxt->pCurrStmt)->pGroupByList) {
+ SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt;
+ if (pSelect->hasAggFuncs || pSelect->hasMultiRowsFunc ||
+ (pSelect->hasIndefiniteRowsFunc &&
+ (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc)))) {
+ return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
+ }
+ if (NULL != pSelect->pWindow || NULL != pSelect->pGroupByList) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC,
"%s function is not supported in window query or group query", pFunc->functionName);
}
@@ -1232,18 +1236,28 @@ static int32_t getMultiResFuncNum(SNodeList* pParameterList) {
return LIST_LENGTH(pParameterList);
}
+static int32_t calcSelectFuncNum(SFunctionNode* pFunc, int32_t currSelectFuncNum) {
+ if (fmIsCumulativeFunc(pFunc->funcId)) {
+ return currSelectFuncNum > 0 ? currSelectFuncNum : 1;
+ }
+ return currSelectFuncNum + ((fmIsMultiResFunc(pFunc->funcId) && !fmIsLastRowFunc(pFunc->funcId))
+ ? getMultiResFuncNum(pFunc->pParameterList)
+ : 1);
+}
+
static void setFuncClassification(SNode* pCurrStmt, SFunctionNode* pFunc) {
if (NULL != pCurrStmt && QUERY_NODE_SELECT_STMT == nodeType(pCurrStmt)) {
SSelectStmt* pSelect = (SSelectStmt*)pCurrStmt;
pSelect->hasAggFuncs = pSelect->hasAggFuncs ? true : fmIsAggFunc(pFunc->funcId);
pSelect->hasRepeatScanFuncs = pSelect->hasRepeatScanFuncs ? true : fmIsRepeatScanFunc(pFunc->funcId);
- pSelect->hasIndefiniteRowsFunc = pSelect->hasIndefiniteRowsFunc ? true : fmIsIndefiniteRowsFunc(pFunc->funcId);
+ if (fmIsIndefiniteRowsFunc(pFunc->funcId)) {
+ pSelect->hasIndefiniteRowsFunc = true;
+ pSelect->returnRows = fmGetFuncReturnRows(pFunc);
+ }
pSelect->hasMultiRowsFunc = pSelect->hasMultiRowsFunc ? true : fmIsMultiRowsFunc(pFunc->funcId);
if (fmIsSelectFunc(pFunc->funcId)) {
pSelect->hasSelectFunc = true;
- pSelect->selectFuncNum += (fmIsMultiResFunc(pFunc->funcId) && !fmIsLastRowFunc(pFunc->funcId))
- ? getMultiResFuncNum(pFunc->pParameterList)
- : 1;
+ pSelect->selectFuncNum = calcSelectFuncNum(pFunc, pSelect->selectFuncNum);
} else if (fmIsVectorFunc(pFunc->funcId)) {
pSelect->hasOtherVectorFunc = true;
}
@@ -2483,6 +2497,9 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) {
}
static int32_t translatePartitionBy(STranslateContext* pCxt, SNodeList* pPartitionByList) {
+ if (NULL == pPartitionByList) {
+ return TSDB_CODE_SUCCESS;
+ }
pCxt->currClause = SQL_CLAUSE_PARTITION_BY;
return translateExprList(pCxt, pPartitionByList);
}
@@ -5571,7 +5588,7 @@ static int32_t rewriteCreateTable(STranslateContext* pCxt, SQuery* pQuery) {
int32_t code = checkCreateTable(pCxt, pStmt, false);
SVgroupInfo info = {0};
- SName name;
+ SName name;
toName(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, &name);
if (TSDB_CODE_SUCCESS == code) {
code = getTableHashVgroupImpl(pCxt, &name, &info);
diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c
index a80509c165..b51624336b 100644
--- a/source/libs/planner/src/planLogicCreater.c
+++ b/source/libs/planner/src/planLogicCreater.c
@@ -632,6 +632,7 @@ static int32_t createWindowLogicNodeFinalize(SLogicPlanContext* pCxt, SSelectStm
pWindow->igExpired = pCxt->pPlanCxt->igExpired;
}
pWindow->inputTsOrder = ORDER_ASC;
+ pWindow->outputTsOrder = ORDER_ASC;
int32_t code = nodesCollectFuncs(pSelect, SQL_CLAUSE_WINDOW, fmIsWindowClauseFunc, &pWindow->pFuncs);
if (TSDB_CODE_SUCCESS == code) {
@@ -764,6 +765,7 @@ static int32_t createFillLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
pFill->node.groupAction = GROUP_ACTION_KEEP;
pFill->node.requireDataOrder = DATA_ORDER_LEVEL_IN_GROUP;
pFill->node.resultDataOrder = DATA_ORDER_LEVEL_IN_GROUP;
+ pFill->inputTsOrder = ORDER_ASC;
int32_t code = nodesCollectColumns(pSelect, SQL_CLAUSE_WINDOW, NULL, COLLECT_COL_TYPE_ALL, &pFill->node.pTargets);
if (TSDB_CODE_SUCCESS == code && NULL == pFill->node.pTargets) {
diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c
index 98b0ce2007..577266a697 100644
--- a/source/libs/planner/src/planOptimizer.c
+++ b/source/libs/planner/src/planOptimizer.c
@@ -38,10 +38,13 @@ typedef struct SOptimizeRule {
FOptimize optimizeFunc;
} SOptimizeRule;
+typedef enum EScanOrder { SCAN_ORDER_ASC = 1, SCAN_ORDER_DESC, SCAN_ORDER_BOTH } EScanOrder;
+
typedef struct SOsdInfo {
SScanLogicNode* pScan;
SNodeList* pSdrFuncs;
SNodeList* pDsoFuncs;
+ EScanOrder scanOrder;
} SOsdInfo;
typedef struct SCpdIsMultiTableCondCxt {
@@ -97,6 +100,27 @@ static EDealRes optRebuildTbanme(SNode** pNode, void* pContext) {
return DEAL_RES_CONTINUE;
}
+static void optSetParentOrder(SLogicNode* pNode, EOrder order) {
+ if (NULL == pNode) {
+ return;
+ }
+ switch (nodeType(pNode)) {
+ case QUERY_NODE_LOGIC_PLAN_WINDOW:
+ ((SWindowLogicNode*)pNode)->inputTsOrder = order;
+ // window has a sorting function, and the operator behind it uses its output order
+ return;
+ case QUERY_NODE_LOGIC_PLAN_JOIN:
+ ((SJoinLogicNode*)pNode)->inputTsOrder = order;
+ break;
+ case QUERY_NODE_LOGIC_PLAN_FILL:
+ ((SFillLogicNode*)pNode)->inputTsOrder = order;
+ break;
+ default:
+ break;
+ }
+ optSetParentOrder(pNode->pParent, order);
+}
+
EDealRes scanPathOptHaveNormalColImpl(SNode* pNode, void* pContext) {
if (QUERY_NODE_COLUMN == nodeType(pNode)) {
// *((bool*)pContext) = (COLUMN_TYPE_TAG != ((SColumnNode*)pNode)->colType);
@@ -179,16 +203,18 @@ static int32_t scanPathOptGetRelatedFuncs(SScanLogicNode* pScan, SNodeList** pSd
SNodeList* pAllFuncs = scanPathOptGetAllFuncs(pScan->node.pParent);
SNodeList* pTmpSdrFuncs = NULL;
SNodeList* pTmpDsoFuncs = NULL;
- SNode* pFunc = NULL;
+ SNode* pNode = NULL;
bool otherFunc = false;
- FOREACH(pFunc, pAllFuncs) {
- int32_t code = TSDB_CODE_SUCCESS;
- if (scanPathOptNeedOptimizeDataRequire((SFunctionNode*)pFunc)) {
- code = nodesListMakeStrictAppend(&pTmpSdrFuncs, nodesCloneNode(pFunc));
- } else if (scanPathOptNeedDynOptimize((SFunctionNode*)pFunc)) {
- code = nodesListMakeStrictAppend(&pTmpDsoFuncs, nodesCloneNode(pFunc));
+ FOREACH(pNode, pAllFuncs) {
+ SFunctionNode* pFunc = (SFunctionNode*)pNode;
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (scanPathOptNeedOptimizeDataRequire(pFunc)) {
+ code = nodesListMakeStrictAppend(&pTmpSdrFuncs, nodesCloneNode(pNode));
+ } else if (scanPathOptNeedDynOptimize(pFunc)) {
+ code = nodesListMakeStrictAppend(&pTmpDsoFuncs, nodesCloneNode(pNode));
} else {
otherFunc = true;
+ break;
}
if (TSDB_CODE_SUCCESS != code) {
nodesDestroyList(pTmpSdrFuncs);
@@ -206,12 +232,46 @@ static int32_t scanPathOptGetRelatedFuncs(SScanLogicNode* pScan, SNodeList** pSd
return TSDB_CODE_SUCCESS;
}
+static int32_t scanPathOptGetScanOrder(SScanLogicNode* pScan, EScanOrder* pScanOrder) {
+ SNodeList* pAllFuncs = scanPathOptGetAllFuncs(pScan->node.pParent);
+ SNode* pNode = NULL;
+ bool hasFirst = false;
+ bool hasLast = false;
+ bool otherFunc = false;
+ FOREACH(pNode, pAllFuncs) {
+ SFunctionNode* pFunc = (SFunctionNode*)pNode;
+ if (FUNCTION_TYPE_FIRST == pFunc->funcType) {
+ hasFirst = true;
+ } else if (FUNCTION_TYPE_LAST == pFunc->funcType) {
+ hasLast = true;
+ } else if (FUNCTION_TYPE_SELECT_VALUE != pFunc->funcType) {
+ otherFunc = true;
+ }
+ }
+ if (hasFirst && hasLast && !otherFunc) {
+ *pScanOrder = SCAN_ORDER_BOTH;
+ } else if (hasLast) {
+ *pScanOrder = SCAN_ORDER_DESC;
+ } else {
+ *pScanOrder = SCAN_ORDER_ASC;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t scanPathOptSetOsdInfo(SOsdInfo* pInfo) {
+ int32_t code = scanPathOptGetRelatedFuncs(pInfo->pScan, &pInfo->pSdrFuncs, &pInfo->pDsoFuncs);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = scanPathOptGetScanOrder(pInfo->pScan, &pInfo->scanOrder);
+ }
+ return code;
+}
+
static int32_t scanPathOptMatch(SOptimizeContext* pCxt, SLogicNode* pLogicNode, SOsdInfo* pInfo) {
pInfo->pScan = (SScanLogicNode*)optFindPossibleNode(pLogicNode, scanPathOptMayBeOptimized);
if (NULL == pInfo->pScan) {
return TSDB_CODE_SUCCESS;
}
- return scanPathOptGetRelatedFuncs(pInfo->pScan, &pInfo->pSdrFuncs, &pInfo->pDsoFuncs);
+ return scanPathOptSetOsdInfo(pInfo);
}
static EFuncDataRequired scanPathOptPromoteDataRequired(EFuncDataRequired l, EFuncDataRequired r) {
@@ -258,15 +318,42 @@ static void scanPathOptSetScanWin(SScanLogicNode* pScan) {
}
}
+static void scanPathOptSetScanOrder(EScanOrder scanOrder, SScanLogicNode* pScan) {
+ if (pScan->sortPrimaryKey || pScan->scanSeq[0] > 1 || pScan->scanSeq[1] > 1) {
+ return;
+ }
+ switch (scanOrder) {
+ case SCAN_ORDER_ASC:
+ pScan->scanSeq[0] = 1;
+ pScan->scanSeq[1] = 0;
+ optSetParentOrder(pScan->node.pParent, ORDER_ASC);
+ break;
+ case SCAN_ORDER_DESC:
+ pScan->scanSeq[0] = 0;
+ pScan->scanSeq[1] = 1;
+ optSetParentOrder(pScan->node.pParent, ORDER_DESC);
+ break;
+ case SCAN_ORDER_BOTH:
+ pScan->scanSeq[0] = 1;
+ pScan->scanSeq[1] = 1;
+ break;
+ default:
+ break;
+ }
+}
+
static int32_t scanPathOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan) {
- SOsdInfo info = {0};
+ SOsdInfo info = {.scanOrder = SCAN_ORDER_ASC};
int32_t code = scanPathOptMatch(pCxt, pLogicSubplan->pNode, &info);
if (TSDB_CODE_SUCCESS == code && info.pScan) {
- scanPathOptSetScanWin((SScanLogicNode*)info.pScan);
+ scanPathOptSetScanWin(info.pScan);
+ scanPathOptSetScanOrder(info.scanOrder, info.pScan);
}
if (TSDB_CODE_SUCCESS == code && (NULL != info.pDsoFuncs || NULL != info.pSdrFuncs)) {
info.pScan->dataRequired = scanPathOptGetDataRequired(info.pSdrFuncs);
info.pScan->pDynamicScanFuncs = info.pDsoFuncs;
+ }
+ if (TSDB_CODE_SUCCESS == code && info.pScan) {
OPTIMIZE_FLAG_SET_MASK(info.pScan->node.optimizedFlag, OPTIMIZE_FLAG_SCAN_PATH);
pCxt->optimized = true;
}
@@ -987,12 +1074,13 @@ static bool sortPriKeyOptMayBeOptimized(SLogicNode* pNode) {
}
SSortLogicNode* pSort = (SSortLogicNode*)pNode;
if (pSort->groupSort || !sortPriKeyOptIsPriKeyOrderBy(pSort->pSortKeys) || 1 != LIST_LENGTH(pSort->node.pChildren)) {
- return TSDB_CODE_SUCCESS;
+ return false;
}
return true;
}
-static int32_t sortPriKeyOptGetScanNodesImpl(SLogicNode* pNode, bool* pNotOptimize, SNodeList** pScanNodes) {
+static int32_t sortPriKeyOptGetSequencingNodesImpl(SLogicNode* pNode, bool* pNotOptimize,
+ SNodeList** pSequencingNodes) {
switch (nodeType(pNode)) {
case QUERY_NODE_LOGIC_PLAN_SCAN: {
SScanLogicNode* pScan = (SScanLogicNode*)pNode;
@@ -1000,17 +1088,19 @@ static int32_t sortPriKeyOptGetScanNodesImpl(SLogicNode* pNode, bool* pNotOptimi
*pNotOptimize = true;
return TSDB_CODE_SUCCESS;
}
- return nodesListMakeAppend(pScanNodes, (SNode*)pNode);
+ return nodesListMakeAppend(pSequencingNodes, (SNode*)pNode);
}
case QUERY_NODE_LOGIC_PLAN_JOIN: {
- int32_t code =
- sortPriKeyOptGetScanNodesImpl((SLogicNode*)nodesListGetNode(pNode->pChildren, 0), pNotOptimize, pScanNodes);
+ int32_t code = sortPriKeyOptGetSequencingNodesImpl((SLogicNode*)nodesListGetNode(pNode->pChildren, 0),
+ pNotOptimize, pSequencingNodes);
if (TSDB_CODE_SUCCESS == code) {
- code =
- sortPriKeyOptGetScanNodesImpl((SLogicNode*)nodesListGetNode(pNode->pChildren, 1), pNotOptimize, pScanNodes);
+ code = sortPriKeyOptGetSequencingNodesImpl((SLogicNode*)nodesListGetNode(pNode->pChildren, 1), pNotOptimize,
+ pSequencingNodes);
}
return code;
}
+ case QUERY_NODE_LOGIC_PLAN_WINDOW:
+ return nodesListMakeAppend(pSequencingNodes, (SNode*)pNode);
case QUERY_NODE_LOGIC_PLAN_AGG:
case QUERY_NODE_LOGIC_PLAN_PARTITION:
*pNotOptimize = true;
@@ -1024,14 +1114,15 @@ static int32_t sortPriKeyOptGetScanNodesImpl(SLogicNode* pNode, bool* pNotOptimi
return TSDB_CODE_SUCCESS;
}
- return sortPriKeyOptGetScanNodesImpl((SLogicNode*)nodesListGetNode(pNode->pChildren, 0), pNotOptimize, pScanNodes);
+ return sortPriKeyOptGetSequencingNodesImpl((SLogicNode*)nodesListGetNode(pNode->pChildren, 0), pNotOptimize,
+ pSequencingNodes);
}
-static int32_t sortPriKeyOptGetScanNodes(SLogicNode* pNode, SNodeList** pScanNodes) {
+static int32_t sortPriKeyOptGetSequencingNodes(SLogicNode* pNode, SNodeList** pSequencingNodes) {
bool notOptimize = false;
- int32_t code = sortPriKeyOptGetScanNodesImpl(pNode, ¬Optimize, pScanNodes);
+ int32_t code = sortPriKeyOptGetSequencingNodesImpl(pNode, ¬Optimize, pSequencingNodes);
if (TSDB_CODE_SUCCESS != code || notOptimize) {
- nodesClearList(*pScanNodes);
+ nodesClearList(*pSequencingNodes);
}
return code;
}
@@ -1040,33 +1131,26 @@ static EOrder sortPriKeyOptGetPriKeyOrder(SSortLogicNode* pSort) {
return ((SOrderByExprNode*)nodesListGetNode(pSort->pSortKeys, 0))->order;
}
-static void sortPriKeyOptSetParentOrder(SLogicNode* pNode, EOrder order) {
- if (NULL == pNode) {
- return;
- }
- if (QUERY_NODE_LOGIC_PLAN_WINDOW == nodeType(pNode)) {
- ((SWindowLogicNode*)pNode)->inputTsOrder = order;
- } else if (QUERY_NODE_LOGIC_PLAN_JOIN == nodeType(pNode)) {
- ((SJoinLogicNode*)pNode)->inputTsOrder = order;
- }
- sortPriKeyOptSetParentOrder(pNode->pParent, order);
-}
-
static int32_t sortPriKeyOptApply(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan, SSortLogicNode* pSort,
- SNodeList* pScanNodes) {
+ SNodeList* pSequencingNodes) {
EOrder order = sortPriKeyOptGetPriKeyOrder(pSort);
- SNode* pScanNode = NULL;
- FOREACH(pScanNode, pScanNodes) {
- SScanLogicNode* pScan = (SScanLogicNode*)pScanNode;
- if (ORDER_DESC == order && pScan->scanSeq[0] > 0) {
- TSWAP(pScan->scanSeq[0], pScan->scanSeq[1]);
+ SNode* pSequencingNode = NULL;
+ FOREACH(pSequencingNode, pSequencingNodes) {
+ if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pSequencingNode)) {
+ SScanLogicNode* pScan = (SScanLogicNode*)pSequencingNode;
+ if ((ORDER_DESC == order && pScan->scanSeq[0] > 0) || (ORDER_ASC == order && pScan->scanSeq[1] > 0)) {
+ TSWAP(pScan->scanSeq[0], pScan->scanSeq[1]);
+ }
+ if (TSDB_SUPER_TABLE == pScan->tableType) {
+ pScan->scanType = SCAN_TYPE_TABLE_MERGE;
+ pScan->node.resultDataOrder = DATA_ORDER_LEVEL_GLOBAL;
+ pScan->node.requireDataOrder = DATA_ORDER_LEVEL_GLOBAL;
+ }
+ pScan->sortPrimaryKey = true;
+ } else if (QUERY_NODE_LOGIC_PLAN_WINDOW == nodeType(pSequencingNode)) {
+ ((SWindowLogicNode*)pSequencingNode)->outputTsOrder = order;
}
- if (TSDB_SUPER_TABLE == pScan->tableType) {
- pScan->scanType = SCAN_TYPE_TABLE_MERGE;
- pScan->node.resultDataOrder = DATA_ORDER_LEVEL_GLOBAL;
- pScan->node.requireDataOrder = DATA_ORDER_LEVEL_GLOBAL;
- }
- sortPriKeyOptSetParentOrder(pScan->node.pParent, order);
+ optSetParentOrder(((SLogicNode*)pSequencingNode)->pParent, order);
}
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pSort->node.pChildren, 0);
@@ -1083,12 +1167,13 @@ static int32_t sortPriKeyOptApply(SOptimizeContext* pCxt, SLogicSubplan* pLogicS
}
static int32_t sortPrimaryKeyOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan, SSortLogicNode* pSort) {
- SNodeList* pScanNodes = NULL;
- int32_t code = sortPriKeyOptGetScanNodes((SLogicNode*)nodesListGetNode(pSort->node.pChildren, 0), &pScanNodes);
- if (TSDB_CODE_SUCCESS == code && NULL != pScanNodes) {
- code = sortPriKeyOptApply(pCxt, pLogicSubplan, pSort, pScanNodes);
+ SNodeList* pSequencingNodes = NULL;
+ int32_t code =
+ sortPriKeyOptGetSequencingNodes((SLogicNode*)nodesListGetNode(pSort->node.pChildren, 0), &pSequencingNodes);
+ if (TSDB_CODE_SUCCESS == code && NULL != pSequencingNodes) {
+ code = sortPriKeyOptApply(pCxt, pLogicSubplan, pSort, pSequencingNodes);
}
- nodesClearList(pScanNodes);
+ nodesClearList(pSequencingNodes);
return code;
}
diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c
index 2e5c4255e6..3771586b34 100644
--- a/source/libs/planner/src/planPhysiCreater.c
+++ b/source/libs/planner/src/planPhysiCreater.c
@@ -1089,6 +1089,8 @@ static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList*
pWindow->triggerType = pWindowLogicNode->triggerType;
pWindow->watermark = pWindowLogicNode->watermark;
pWindow->igExpired = pWindowLogicNode->igExpired;
+ pWindow->inputTsOrder = pWindowLogicNode->inputTsOrder;
+ pWindow->outputTsOrder = pWindowLogicNode->outputTsOrder;
SNodeList* pPrecalcExprs = NULL;
SNodeList* pFuncs = NULL;
@@ -1363,6 +1365,7 @@ static int32_t createFillPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren
pFill->mode = pFillNode->mode;
pFill->timeRange = pFillNode->timeRange;
+ pFill->inputTsOrder = pFillNode->inputTsOrder;
SDataBlockDescNode* pChildTupe = (((SPhysiNode*)nodesListGetNode(pChildren, 0))->pOutputDataBlockDesc);
int32_t code = setListSlotId(pCxt, pChildTupe->dataBlockId, -1, pFillNode->node.pTargets, &pFill->pTargets);
diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c
index 81e2bff179..bc5e50218b 100644
--- a/source/libs/planner/src/planSpliter.c
+++ b/source/libs/planner/src/planSpliter.c
@@ -492,7 +492,7 @@ static int32_t stbSplSplitIntervalForBatch(SSplitContext* pCxt, SStableSplitInfo
((SWindowLogicNode*)pInfo->pSplitNode)->windowAlgo = INTERVAL_ALGO_MERGE;
SNodeList* pMergeKeys = NULL;
code = stbSplCreateMergeKeysByPrimaryKey(((SWindowLogicNode*)pInfo->pSplitNode)->pTspk,
- ((SWindowLogicNode*)pInfo->pSplitNode)->inputTsOrder, &pMergeKeys);
+ ((SWindowLogicNode*)pInfo->pSplitNode)->outputTsOrder, &pMergeKeys);
if (TSDB_CODE_SUCCESS == code) {
code = stbSplCreateMergeNode(pCxt, NULL, pInfo->pSplitNode, pMergeKeys, pPartWindow, true);
}
diff --git a/source/libs/planner/test/planBasicTest.cpp b/source/libs/planner/test/planBasicTest.cpp
index 9cfae68d34..d7c947a20d 100644
--- a/source/libs/planner/test/planBasicTest.cpp
+++ b/source/libs/planner/test/planBasicTest.cpp
@@ -175,6 +175,16 @@ TEST_F(PlanBasicTest, pseudoColumn) {
"WHERE ts BETWEEN '2017-7-14 18:00:00' AND '2017-7-14 19:00:00' INTERVAL(10S)");
}
+TEST_F(PlanBasicTest, indefiniteRowsFunc) {
+ useDb("root", "test");
+
+ run("SELECT DIFF(c1) FROM t1");
+
+ run("SELECT DIFF(c1), c2 FROM t1");
+
+ run("SELECT DIFF(c1), DIFF(c3), ts FROM t1");
+}
+
TEST_F(PlanBasicTest, withoutFrom) {
useDb("root", "test");
diff --git a/source/libs/planner/test/planOptimizeTest.cpp b/source/libs/planner/test/planOptimizeTest.cpp
index 058705403b..6c5b760564 100644
--- a/source/libs/planner/test/planOptimizeTest.cpp
+++ b/source/libs/planner/test/planOptimizeTest.cpp
@@ -30,6 +30,11 @@ TEST_F(PlanOptimizeTest, scanPath) {
run("SELECT COUNT(CAST(c1 AS BIGINT)) FROM t1");
run("SELECT PERCENTILE(c1, 40), COUNT(*) FROM t1");
+
+ run("SELECT LAST(c1) FROM t1");
+
+ run("SELECT LAST(c1) FROM t1 WHERE ts BETWEEN '2022-7-29 11:10:10' AND '2022-7-30 11:10:10' INTERVAL(10S) "
+ "FILL(LINEAR)");
}
TEST_F(PlanOptimizeTest, pushDownCondition) {
@@ -57,7 +62,15 @@ TEST_F(PlanOptimizeTest, sortPrimaryKey) {
run("SELECT c1 FROM t1 ORDER BY ts DESC");
+ run("SELECT c1 FROM st1 ORDER BY ts DESC");
+
run("SELECT COUNT(*) FROM t1 INTERVAL(10S) ORDER BY _WSTART DESC");
+
+ run("SELECT FIRST(c1) FROM t1 WHERE ts BETWEEN '2022-7-29 11:10:10' AND '2022-7-30 11:10:10' INTERVAL(10S) "
+ "FILL(LINEAR) ORDER BY _WSTART DESC");
+
+ run("SELECT LAST(c1) FROM t1 WHERE ts BETWEEN '2022-7-29 11:10:10' AND '2022-7-30 11:10:10' INTERVAL(10S) "
+ "FILL(LINEAR) ORDER BY _WSTART");
}
TEST_F(PlanOptimizeTest, PartitionTags) {
diff --git a/source/libs/qworker/inc/qwMsg.h b/source/libs/qworker/inc/qwMsg.h
index 5378934343..3ee870ef96 100644
--- a/source/libs/qworker/inc/qwMsg.h
+++ b/source/libs/qworker/inc/qwMsg.h
@@ -39,7 +39,7 @@ int32_t qwBuildAndSendFetchRsp(int32_t rspType, SRpcHandleInfo *pConn, SRetrieve
void qwBuildFetchRsp(void *msg, SOutputData *input, int32_t len, bool qComplete);
int32_t qwBuildAndSendCQueryMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn);
int32_t qwBuildAndSendQueryRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t code, SQWTaskCtx *ctx);
-int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SExplainExecInfo *execInfo, int32_t num);
+int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SArray* pExecList);
int32_t qwBuildAndSendErrorRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t code);
void qwFreeFetchRsp(void *msg);
int32_t qwMallocFetchRsp(int32_t length, SRetrieveTableRsp **rsp);
diff --git a/source/libs/qworker/src/qwMsg.c b/source/libs/qworker/src/qwMsg.c
index 7901fa64cb..e8ffd98153 100644
--- a/source/libs/qworker/src/qwMsg.c
+++ b/source/libs/qworker/src/qwMsg.c
@@ -82,8 +82,9 @@ int32_t qwBuildAndSendQueryRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t c
return TSDB_CODE_SUCCESS;
}
-int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SExplainExecInfo *execInfo, int32_t num) {
- SExplainRsp rsp = {.numOfPlans = num, .subplanInfo = execInfo};
+int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SArray* pExecList) {
+ SExplainExecInfo* pInfo = taosArrayGet(pExecList, 0);
+ SExplainRsp rsp = {.numOfPlans = taosArrayGetSize(pExecList), .subplanInfo = pInfo};
int32_t contLen = tSerializeSExplainRsp(NULL, 0, &rsp);
void * pRsp = rpcMallocCont(contLen);
@@ -96,10 +97,9 @@ int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SExplainExecInfo *execIn
.code = 0,
.info = *pConn,
};
+
rpcRsp.info.ahandle = NULL;
-
tmsgSendRsp(&rpcRsp);
-
return TSDB_CODE_SUCCESS;
}
diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c
index d1f8a50dab..36d85f1f12 100644
--- a/source/libs/qworker/src/qworker.c
+++ b/source/libs/qworker/src/qworker.c
@@ -44,18 +44,24 @@ int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *re
QW_RET(TSDB_CODE_SUCCESS);
}
+static void freeItem(void* param) {
+ SExplainExecInfo* pInfo = param;
+ taosMemoryFree(pInfo->verboseInfo);
+}
+
int32_t qwHandleTaskComplete(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
qTaskInfo_t taskHandle = ctx->taskHandle;
if (TASK_TYPE_TEMP == ctx->taskType && taskHandle) {
if (ctx->explain) {
- SExplainExecInfo *execInfo = NULL;
- int32_t resNum = 0;
- QW_ERR_RET(qGetExplainExecInfo(taskHandle, &resNum, &execInfo));
+ SArray* execInfoList = taosArrayInit(4, sizeof(SExplainExecInfo));
+ QW_ERR_RET(qGetExplainExecInfo(taskHandle, execInfoList));
SRpcHandleInfo connInfo = ctx->ctrlConnInfo;
connInfo.ahandle = NULL;
- QW_ERR_RET(qwBuildAndSendExplainRsp(&connInfo, execInfo, resNum));
+ int32_t code = qwBuildAndSendExplainRsp(&connInfo, execInfoList);
+ taosArrayDestroyEx(execInfoList, freeItem);
+ QW_ERR_RET(code);
}
if (!ctx->needFetch) {
diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c
index 7d0f53640c..0f63510b12 100644
--- a/source/libs/sync/src/syncAppendEntries.c
+++ b/source/libs/sync/src/syncAppendEntries.c
@@ -790,65 +790,6 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs
}
} while (0);
-#if 0
- // fake match
- //
- // condition1:
- // I have snapshot, no log, preIndex > myLastIndex
- //
- // condition2:
- // I have snapshot, have log, log <= snapshot, preIndex > myLastIndex
- //
- // condition3:
- // I have snapshot, preIndex < snapshot.lastApplyIndex
- //
- // condition4:
- // I have snapshot, preIndex == snapshot.lastApplyIndex, no data
- //
- // operation:
- // match snapshot.lastApplyIndex - 1;
- // no operation on log
- do {
- SyncIndex myLastIndex = syncNodeGetLastIndex(ths);
- SSnapshot snapshot;
- ths->pFsm->FpGetSnapshotInfo(ths->pFsm, &snapshot);
-
- bool condition0 = (pMsg->term == ths->pRaftStore->currentTerm) && (ths->state == TAOS_SYNC_STATE_FOLLOWER) &&
- syncNodeHasSnapshot(ths);
- bool condition1 =
- condition0 && (ths->pLogStore->syncLogEntryCount(ths->pLogStore) == 0) && (pMsg->prevLogIndex > myLastIndex); // donot use syncLogEntryCount!!! use isEmpty
- bool condition2 = condition0 && (ths->pLogStore->syncLogLastIndex(ths->pLogStore) <= snapshot.lastApplyIndex) &&
- (pMsg->prevLogIndex > myLastIndex);
- bool condition3 = condition0 && (pMsg->prevLogIndex < snapshot.lastApplyIndex);
- bool condition4 = condition0 && (pMsg->prevLogIndex == snapshot.lastApplyIndex) && (pMsg->dataLen == 0);
- bool condition = condition1 || condition2 || condition3 || condition4;
-
- if (condition) {
- char logBuf[128];
- snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries, fake match, pre-index:%" PRId64 ", pre-term:%" PRIu64,
- pMsg->prevLogIndex, pMsg->prevLogTerm);
- syncNodeEventLog(ths, logBuf);
-
- // prepare response msg
- SyncAppendEntriesReply* pReply = syncAppendEntriesReplyBuild(ths->vgId);
- pReply->srcId = ths->myRaftId;
- pReply->destId = pMsg->srcId;
- pReply->term = ths->pRaftStore->currentTerm;
- pReply->privateTerm = ths->pNewNodeReceiver->privateTerm;
- pReply->success = true;
- pReply->matchIndex = snapshot.lastApplyIndex;
-
- // send response
- SRpcMsg rpcMsg;
- syncAppendEntriesReply2RpcMsg(pReply, &rpcMsg);
- syncNodeSendMsgById(&pReply->destId, ths, &rpcMsg);
- syncAppendEntriesReplyDestroy(pReply);
-
- return ret;
- }
- } while (0);
-#endif
-
// fake match
//
// condition1:
diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c
index 81d050e179..bfaa785d0f 100644
--- a/source/libs/sync/src/syncAppendEntriesReply.c
+++ b/source/libs/sync/src/syncAppendEntriesReply.c
@@ -213,6 +213,11 @@ int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntrie
if (nextIndex > SYNC_INDEX_BEGIN) {
--nextIndex;
+ // speed up
+ if (nextIndex > pMsg->matchIndex + 1) {
+ nextIndex = pMsg->matchIndex + 1;
+ }
+
bool needStartSnapshot = false;
if (nextIndex >= SYNC_INDEX_BEGIN && !ths->pLogStore->syncLogExist(ths->pLogStore, nextIndex)) {
needStartSnapshot = true;
diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c
index 2c64728998..52cbcd0059 100644
--- a/source/libs/sync/src/syncMain.c
+++ b/source/libs/sync/src/syncMain.c
@@ -2222,13 +2222,18 @@ SyncTerm syncNodeGetPreTerm(SSyncNode* pSyncNode, SyncIndex index) {
SyncIndex preIndex = index - 1;
SSyncRaftEntry* pPreEntry = NULL;
int32_t code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, preIndex, &pPreEntry);
+
+ SSnapshot snapshot = {.data = NULL,
+ .lastApplyIndex = SYNC_INDEX_INVALID,
+ .lastApplyTerm = SYNC_TERM_INVALID,
+ .lastConfigIndex = SYNC_INDEX_INVALID};
+
if (code == 0) {
ASSERT(pPreEntry != NULL);
preTerm = pPreEntry->term;
taosMemoryFree(pPreEntry);
return preTerm;
} else {
- SSnapshot snapshot = {.data = NULL, .lastApplyIndex = -1, .lastApplyTerm = 0, .lastConfigIndex = -1};
if (pSyncNode->pFsm->FpGetSnapshotInfo != NULL) {
pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, &snapshot);
if (snapshot.lastApplyIndex == preIndex) {
@@ -2239,7 +2244,8 @@ SyncTerm syncNodeGetPreTerm(SSyncNode* pSyncNode, SyncIndex index) {
do {
char logBuf[128];
- snprintf(logBuf, sizeof(logBuf), "sync node get pre term error, index:%" PRId64, index);
+ snprintf(logBuf, sizeof(logBuf), "sync node get pre term error, index:%ld, snap-index:%ld, snap-term:%lu", index,
+ snapshot.lastApplyIndex, snapshot.lastApplyTerm);
syncNodeErrorLog(pSyncNode, logBuf);
} while (0);
diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c
index fa3b5d52d7..1a2a083677 100644
--- a/source/libs/sync/src/syncReplication.c
+++ b/source/libs/sync/src/syncReplication.c
@@ -132,7 +132,8 @@ int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) {
SyncIndex preLogIndex = syncNodeGetPreIndex(pSyncNode, nextIndex);
SyncTerm preLogTerm = syncNodeGetPreTerm(pSyncNode, nextIndex);
if (preLogTerm == SYNC_TERM_INVALID) {
- SyncIndex newNextIndex = syncNodeGetLastIndex(pSyncNode) + 1;
+ // SyncIndex newNextIndex = syncNodeGetLastIndex(pSyncNode) + 1;
+ SyncIndex newNextIndex = nextIndex + 1;
syncIndexMgrSetIndex(pSyncNode->pNextIndex, pDestId, newNextIndex);
syncIndexMgrSetIndex(pSyncNode->pMatchIndex, pDestId, SYNC_INDEX_INVALID);
sError("vgId:%d sync get pre term error, nextIndex:%" PRId64 ", update next-index:%" PRId64
@@ -222,7 +223,8 @@ int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode) {
SyncIndex preLogIndex = syncNodeGetPreIndex(pSyncNode, nextIndex);
SyncTerm preLogTerm = syncNodeGetPreTerm(pSyncNode, nextIndex);
if (preLogTerm == SYNC_TERM_INVALID) {
- SyncIndex newNextIndex = syncNodeGetLastIndex(pSyncNode) + 1;
+ // SyncIndex newNextIndex = syncNodeGetLastIndex(pSyncNode) + 1;
+ SyncIndex newNextIndex = nextIndex + 1;
syncIndexMgrSetIndex(pSyncNode->pNextIndex, pDestId, newNextIndex);
syncIndexMgrSetIndex(pSyncNode->pMatchIndex, pDestId, SYNC_INDEX_INVALID);
sError("vgId:%d sync get pre term error, nextIndex:%" PRId64 ", update next-index:%" PRId64
diff --git a/source/libs/sync/test/sh/auto_bench.sh b/source/libs/sync/test/sh/auto_bench.sh
new file mode 100644
index 0000000000..32dc071018
--- /dev/null
+++ b/source/libs/sync/test/sh/auto_bench.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+if [ $# != 5 ] ; then
+ echo "Uasge: $0 instances vgroups replica ctables rows"
+ echo ""
+ exit 1
+fi
+
+instances=$1
+vgroups=$2
+replica=$3
+ctables=$4
+rows=$5
+
+echo "params: instances:${instances}, vgroups:${vgroups}, replica:${replica}, ctables:${ctables}, rows:${rows}"
+
+dt=`date "+%Y-%m-%d-%H-%M-%S"`
+casedir=instances_${instances}_vgroups_${vgroups}_replica_${replica}_ctables_${ctables}_rows_${rows}_${dt}
+mkdir ${casedir}
+cp ./insert.tpl.json ${casedir}
+cd ${casedir}
+
+for i in `seq 1 ${instances}`;do
+ #echo ===$i===
+ cfg_file=bench_${i}.json
+ cp ./insert.tpl.json ${cfg_file}
+ rstfile=result_${i}
+ sed -i 's/tpl_vgroups_tpl/'${vgroups}'/g' ${cfg_file}
+ sed -i 's/tpl_replica_tpl/'${replica}'/g' ${cfg_file}
+ sed -i 's/tpl_ctables_tpl/'${ctables}'/g' ${cfg_file}
+ sed -i 's/tpl_stid_tpl/'${i}'/g' ${cfg_file}
+ sed -i 's/tpl_rows_tpl/'${rows}'/g' ${cfg_file}
+ sed -i 's/tpl_insert_result_tpl/'${rstfile}'/g' ${cfg_file}
+done
+
+for conf_file in `ls ./bench_*.json`;do
+ echo "nohup taosBenchmark -f ${conf_file} &"
+ nohup taosBenchmark -f ${conf_file} &
+done
+
+cd -
+
+exit 0
+
+
diff --git a/source/libs/sync/test/sh/insert.tpl.json b/source/libs/sync/test/sh/insert.tpl.json
new file mode 100644
index 0000000000..633dd70a24
--- /dev/null
+++ b/source/libs/sync/test/sh/insert.tpl.json
@@ -0,0 +1,77 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos/",
+ "host": "v3cluster-0001",
+ "port": 7100,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 8,
+ "thread_count_create_tbl": 8,
+ "result_file": "./tpl_insert_result_tpl",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 0,
+ "num_of_records_per_req": 100000,
+ "databases": [
+ {
+ "dbinfo": {
+ "name": "db1",
+ "drop": "yes",
+ "vgroups": tpl_vgroups_tpl,
+ "replica": tpl_replica_tpl
+ },
+ "super_tables": [
+ {
+ "name": "stb_tpl_stid_tpl",
+ "child_table_exists": "no",
+ "childtable_count": tpl_ctables_tpl,
+ "childtable_prefix": "stb_tpl_stid_tpl_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 50000,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": tpl_rows_tpl,
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 10000000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10,
+ "sample_format": "csv",
+ "use_sample_ts": "no",
+ "tags_file": "",
+ "columns": [
+ {
+ "type": "INT"
+ },
+ {
+ "type": "DOUBLE",
+ "count": 1
+ },
+ {
+ "type": "BINARY",
+ "len": 40,
+ "count": 1
+ },
+ {
+ "type": "nchar",
+ "len": 20,
+ "count": 1
+ }
+ ],
+ "tags": [
+ {
+ "type": "TINYINT",
+ "count": 1
+ },
+ {
+ "type": "BINARY",
+ "len": 16,
+ "count": 1
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
diff --git a/source/os/CMakeLists.txt b/source/os/CMakeLists.txt
index f773e4ff58..2a9d0c8535 100644
--- a/source/os/CMakeLists.txt
+++ b/source/os/CMakeLists.txt
@@ -41,7 +41,7 @@ target_link_libraries(
)
if(TD_WINDOWS)
target_link_libraries(
- os PUBLIC ws2_32 iconv msvcregex wcwidth winmm
+ os PUBLIC ws2_32 iconv msvcregex wcwidth winmm crashdump
)
elseif(TD_DARWIN_64)
target_link_libraries(
diff --git a/source/os/src/osSysinfo.c b/source/os/src/osSysinfo.c
index fa94bc6a13..3e68b6e086 100644
--- a/source/os/src/osSysinfo.c
+++ b/source/os/src/osSysinfo.c
@@ -91,6 +91,7 @@ LONG WINAPI FlCrashDump(PEXCEPTION_POINTERS ep) {
return EXCEPTION_CONTINUE_SEARCH;
}
+LONG WINAPI exceptionHandler(LPEXCEPTION_POINTERS exception);
#elif defined(_TD_DARWIN_64)
@@ -841,7 +842,8 @@ char *taosGetCmdlineByPID(int pid) {
void taosSetCoreDump(bool enable) {
#ifdef WINDOWS
- SetUnhandledExceptionFilter(&FlCrashDump);
+ // SetUnhandledExceptionFilter(exceptionHandler);
+ // SetUnhandledExceptionFilter(&FlCrashDump);
#elif defined(_TD_DARWIN_64)
#else
if (!enable) return;
diff --git a/source/os/src/osSystem.c b/source/os/src/osSystem.c
index ad7fa57182..c86cd19e32 100644
--- a/source/os/src/osSystem.c
+++ b/source/os/src/osSystem.c
@@ -18,6 +18,63 @@
#include "os.h"
#if defined(WINDOWS)
+typedef void (*MainWindows)(int argc,char** argv);
+MainWindows mainWindowsFunc = NULL;
+
+SERVICE_STATUS ServiceStatus;
+SERVICE_STATUS_HANDLE hServiceStatusHandle;
+void WINAPI windowsServiceCtrlHandle(DWORD request) {
+ switch (request) {
+ case SERVICE_CONTROL_STOP:
+ case SERVICE_CONTROL_SHUTDOWN:
+ raise(SIGINT);
+ ServiceStatus.dwCurrentState = SERVICE_STOP_PENDING;
+ if (!SetServiceStatus(hServiceStatusHandle, &ServiceStatus)) {
+ DWORD nError = GetLastError();
+ printf("failed to send stopped status to windows service: %d",nError);
+ }
+ break;
+ default:
+ return;
+ }
+}
+void WINAPI mainWindowsService(int argc,char** argv) {
+ int ret = 0;
+ ServiceStatus.dwServiceType = SERVICE_WIN32;
+ ServiceStatus.dwControlsAccepted = SERVICE_ACCEPT_PAUSE_CONTINUE | SERVICE_ACCEPT_STOP | SERVICE_ACCEPT_SHUTDOWN;
+ ServiceStatus.dwCurrentState = SERVICE_START_PENDING;
+ ServiceStatus.dwWin32ExitCode = 0;
+ ServiceStatus.dwCheckPoint = 0;
+ ServiceStatus.dwWaitHint = 0;
+ ServiceStatus.dwServiceSpecificExitCode = 0;
+ hServiceStatusHandle = RegisterServiceCtrlHandler("taosd", &windowsServiceCtrlHandle);
+ if (hServiceStatusHandle == 0) {
+ DWORD nError = GetLastError();
+ printf("failed to register windows service ctrl handler: %d",nError);
+ }
+
+ ServiceStatus.dwCurrentState = SERVICE_RUNNING;
+ if (SetServiceStatus(hServiceStatusHandle, &ServiceStatus)) {
+ DWORD nError = GetLastError();
+ printf("failed to send running status to windows service: %d",nError);
+ }
+ if (mainWindowsFunc != NULL) mainWindowsFunc(argc, argv);
+ ServiceStatus.dwCurrentState = SERVICE_STOPPED;
+ if (!SetServiceStatus(hServiceStatusHandle, &ServiceStatus)) {
+ DWORD nError = GetLastError();
+ printf("failed to send stopped status to windows service: %d",nError);
+ }
+}
+void stratWindowsService(MainWindows mainWindows) {
+ mainWindowsFunc = mainWindows;
+ SERVICE_TABLE_ENTRY ServiceTable[2];
+ ServiceTable[0].lpServiceName = "taosd";
+ ServiceTable[0].lpServiceProc = (LPSERVICE_MAIN_FUNCTION)mainWindowsService;
+ ServiceTable[1].lpServiceName = NULL;
+ ServiceTable[1].lpServiceProc = NULL;
+ StartServiceCtrlDispatcher(ServiceTable);
+}
+
#elif defined(_TD_DARWIN_64)
#else
#include
diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c
index 0eb7737e8e..73d89523ce 100644
--- a/source/util/src/tlog.c
+++ b/source/util/src/tlog.c
@@ -82,7 +82,7 @@ int64_t tsNumOfTraceLogs = 0;
// log
int32_t dDebugFlag = 135;
int32_t vDebugFlag = 135;
-int32_t mDebugFlag = 131;
+int32_t mDebugFlag = 135;
int32_t cDebugFlag = 131;
int32_t jniDebugFlag = 131;
int32_t tmrDebugFlag = 131;
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index 88b721d104..d745ecf706 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -89,13 +89,20 @@
./test.sh -f tsim/parser/alter_column.sim
./test.sh -f tsim/parser/alter_stable.sim
./test.sh -f tsim/parser/alter.sim
-# TD-17661 ./test.sh -f tsim/parser/alter1.sim
+# TD-17959 ./test.sh -f tsim/parser/alter1.sim
./test.sh -f tsim/parser/auto_create_tb_drop_tb.sim
./test.sh -f tsim/parser/auto_create_tb.sim
./test.sh -f tsim/parser/between_and.sim
./test.sh -f tsim/parser/binary_escapeCharacter.sim
-# TD-17738 ./test.sh -f tsim/parser/col_arithmetic_operation.sim
-# TD-17661 ./test.sh -f tsim/parser/columnValue.sim
+./test.sh -f tsim/parser/col_arithmetic_operation.sim
+./test.sh -f tsim/parser/columnValue_bigint.sim
+./test.sh -f tsim/parser/columnValue_bool.sim
+./test.sh -f tsim/parser/columnValue_double.sim
+./test.sh -f tsim/parser/columnValue_float.sim
+./test.sh -f tsim/parser/columnValue_int.sim
+./test.sh -f tsim/parser/columnValue_smallint.sim
+./test.sh -f tsim/parser/columnValue_tinyint.sim
+./test.sh -f tsim/parser/columnValue_unsign.sim
./test.sh -f tsim/parser/commit.sim
./test.sh -f tsim/parser/condition.sim
./test.sh -f tsim/parser/constCol.sim
@@ -145,7 +152,7 @@
./test.sh -f tsim/parser/select_across_vnodes.sim
./test.sh -f tsim/parser/select_distinct_tag.sim
./test.sh -f tsim/parser/select_from_cache_disk.sim
-# TD-17832 ./test.sh -f tsim/parser/select_with_tags.sim
+./test.sh -f tsim/parser/select_with_tags.sim
./test.sh -f tsim/parser/selectResNum.sim
./test.sh -f tsim/parser/set_tag_vals.sim
./test.sh -f tsim/parser/single_row_in_tb.sim
@@ -154,15 +161,15 @@
./test.sh -f tsim/parser/slimit.sim
./test.sh -f tsim/parser/slimit1.sim
./test.sh -f tsim/parser/stableOp.sim
-# TD-17661 ./test.sh -f tsim/parser/tags_dynamically_specifiy.sim
-# TD-17661 ./test.sh -f tsim/parser/tags_filter.sim
+./test.sh -f tsim/parser/tags_dynamically_specifiy.sim
+./test.sh -f tsim/parser/tags_filter.sim
./test.sh -f tsim/parser/tbnameIn.sim
./test.sh -f tsim/parser/timestamp.sim
./test.sh -f tsim/parser/top_groupby.sim
./test.sh -f tsim/parser/topbot.sim
./test.sh -f tsim/parser/union.sim
-# TD-17704 ./test.sh -f tsim/parser/union_sysinfo.sim
-# TD-17661 ./test.sh -f tsim/parser/where.sim
+./test.sh -f tsim/parser/union_sysinfo.sim
+./test.sh -f tsim/parser/where.sim
# ---- query ----
./test.sh -f tsim/query/charScalarFunction.sim
@@ -422,18 +429,18 @@
./test.sh -f tsim/tag/bool_binary.sim
./test.sh -f tsim/tag/bool_int.sim
./test.sh -f tsim/tag/bool.sim
-# TD-17661 ./test.sh -f tsim/tag/change.sim
+# TD-17407 ./test.sh -f tsim/tag/change.sim
./test.sh -f tsim/tag/column.sim
./test.sh -f tsim/tag/commit.sim
-# TD-17661 ./test.sh -f tsim/tag/create.sim
-# TD-17661 ./test.sh -f tsim/tag/delete.sim
-# TD-17661 ./test.sh -f tsim/tag/double.sim
-# TD-17661 ./test.sh -f tsim/tag/filter.sim
+# TD-17407 ./test.sh -f tsim/tag/create.sim
+# TD-17407 ./test.sh -f tsim/tag/delete.sim
+# TD-17407 ./test.sh -f tsim/tag/double.sim
+./test.sh -f tsim/tag/filter.sim
# TD-17407 ./test.sh -f tsim/tag/float.sim
./test.sh -f tsim/tag/int_binary.sim
./test.sh -f tsim/tag/int_float.sim
./test.sh -f tsim/tag/int.sim
-# TD-17661 ./test.sh -f tsim/tag/set.sim
+# TD-17959 ./test.sh -f tsim/tag/set.sim
./test.sh -f tsim/tag/smallint.sim
./test.sh -f tsim/tag/tinyint.sim
diff --git a/tests/script/tsim/alter/dnode.sim b/tests/script/tsim/alter/dnode.sim
index 35620f17aa..d773c1f8a9 100644
--- a/tests/script/tsim/alter/dnode.sim
+++ b/tests/script/tsim/alter/dnode.sim
@@ -24,14 +24,20 @@ sql alter dnode 1 'fsDebugFlag 131'
sql alter dnode 1 'udfDebugFlag 131'
sql alter dnode 1 'smaDebugFlag 131'
sql alter dnode 1 'idxDebugFlag 131'
+sql alter dnode 1 'tdbDebugFlag 131'
sql alter dnode 1 'tmrDebugFlag 131'
sql alter dnode 1 'uDebugFlag 131'
sql alter dnode 1 'smaDebugFlag 131'
sql alter dnode 1 'rpcDebugFlag 131'
sql alter dnode 1 'qDebugFlag 131'
+sql alter dnode 1 'metaDebugFlag 131'
sql_error alter dnode 2 'wDebugFlag 135'
sql_error alter dnode 2 'tmrDebugFlag 135'
+sql_error alter dnode 1 'monDebugFlag 131'
+sql_error alter dnode 1 'cqDebugFlag 131'
+sql_error alter dnode 1 'httpDebugFlag 131'
+sql_error alter dnode 1 'mqttDebugFlag 131'
print ======== step3
sql_error alter $hostname1 debugFlag 135
diff --git a/tests/script/tsim/compute/diff2.sim b/tests/script/tsim/compute/diff2.sim
index 021fcf6e8b..a09bee991e 100644
--- a/tests/script/tsim/compute/diff2.sim
+++ b/tests/script/tsim/compute/diff2.sim
@@ -79,7 +79,7 @@ sql select diff(c7) from $tb
sql_error select diff(c8) from $tb
sql_error select diff(c9) from $tb
sql_error select diff(ts) from $tb
-sql_error select diff(c1), diff(c2) from $tb
+sql select diff(c1), diff(c2) from $tb
sql select 2+diff(c1) from $tb
sql select diff(c1+2) from $tb
diff --git a/tests/script/tsim/insert/basic0.sim b/tests/script/tsim/insert/basic0.sim
index 1f3c93a4bf..7d91a77a83 100644
--- a/tests/script/tsim/insert/basic0.sim
+++ b/tests/script/tsim/insert/basic0.sim
@@ -54,7 +54,8 @@ print $data30 $data31 $data32 $data33
if $rows != 4 then
return -1
endi
-if $data01 != 10 then
+if $data01 != 10 then
+ print expect 10, actual: $data01
return -1
endi
if $data02 != 2.00000 then
diff --git a/tests/script/tsim/parser/alter1.sim b/tests/script/tsim/parser/alter1.sim
index b01e98a834..d917f4b61e 100644
--- a/tests/script/tsim/parser/alter1.sim
+++ b/tests/script/tsim/parser/alter1.sim
@@ -103,7 +103,7 @@ endi
print ================== change a tag value
sql alter table car1 set tag carid=10
-sql select carId, carmodel from car1
+sql select distinct carId, carmodel from car1
if $rows != 1 then
return -1
endi
diff --git a/tests/script/tsim/parser/col_arithmetic_operation.sim b/tests/script/tsim/parser/col_arithmetic_operation.sim
index add2945c66..f22beefdf8 100644
--- a/tests/script/tsim/parser/col_arithmetic_operation.sim
+++ b/tests/script/tsim/parser/col_arithmetic_operation.sim
@@ -131,20 +131,5 @@ sql_error select max(c1-c2) from $tb
#========================================regression test cases====================================
print =====================> td-1764
sql select sum(c1)/count(*), sum(c1) as b, count(*) as b from $stb interval(1y)
-if $rows != 1 then
- return -1
-endi
-
-if $data00 != @18-01-01 00:00:00.000@ then
- return -1
-endi
-
-if $data01 != 2.250000000 then
- return -1
-endi
-
-if $data02 != 225000 then
- return -1
-endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/tsim/parser/col_arithmetic_query.sim b/tests/script/tsim/parser/col_arithmetic_query.sim
index 10840b2296..b77dcbe498 100644
--- a/tests/script/tsim/parser/col_arithmetic_query.sim
+++ b/tests/script/tsim/parser/col_arithmetic_query.sim
@@ -511,24 +511,21 @@ if $rows != 1 then
endi
# slimit/soffset not support for normal table query. [d.11]===============================================================
-sql select sum(c1) from $stb slimit 1 soffset 19;
-if $rows != 0 then
- return -1
-endi
+sql_error select sum(c1) from $stb slimit 1 soffset 19;
-sql select sum(c1) from $stb interval(1s) group by tbname slimit 1 soffset 1
-sql select sum(c1) from ca_stb0 interval(1s) group by tbname slimit 2 soffset 4 limit 10 offset 1
+sql select sum(c1) from ca_stb0 partition by tbname interval(1s) slimit 1 soffset 1
+sql select sum(c1) from ca_stb0 partition by tbname interval(1s) slimit 2 soffset 4 limit 10 offset 1
# fill [d.12]===============================================================
-sql_error select first(c1)-last(c1), sum(c3)*count(c3), spread(c5 ) % count(*) from $stb interval(1s) fill(prev);
-sql_error select first(c1) from $stb fill(value, 20);
+sql_error select first(c1)-last(c1), sum(c3)*count(c3), spread(c5 ) % count(*) from ca_stb0 interval(1s) fill(prev);
+sql_error select first(c1) from ca_stb0 fill(value, 20);
# constant column. [d.13]===============================================================
# column value filter [d.14]===============================================================
# tag filter. [d.15]===============================================================
-sql select sum(c2)+99 from $stb where t1=12;
+sql select sum(c2)+99 from ca_stb0 where t1=12;
# multi-field output [d.16]===============================================================
sql select count(*), sum(c1)*avg(c2), avg(c3)*count(c3), sum(c3), sum(c4), first(c7), last(c8), first(c9), first(c7), last(c8) from $tb
@@ -548,15 +545,12 @@ if $data90 != 9.500000000 then
endi
# interval query [d.17]===============================================================
-sql select avg(c2)*count(c2), sum(c3)-first(c3), last(c4)+9 from $stb interval(1s)
+sql select avg(c2)*count(c2), sum(c3)-first(c3), last(c4)+9 from ca_stb0 interval(1s)
if $rows != 10000 then
return -1
endi
-if $data00 != @18-09-17 09:00:00.000@ then
- return -1
-endi
-sql_error select first(c7)- last(c1) from $tb interval(2y)
+sql select first(c7)- last(c1) from $tb interval(2y)
# aggregation query [d.18]===============================================================
# all cases in this part are aggregation query test.
diff --git a/tests/script/tsim/parser/columnValue.sim b/tests/script/tsim/parser/columnValue.sim
deleted file mode 100644
index 68336cdcc1..0000000000
--- a/tests/script/tsim/parser/columnValue.sim
+++ /dev/null
@@ -1,22 +0,0 @@
-system sh/stop_dnodes.sh
-system sh/deploy.sh -n dnode1 -i 1
-system sh/exec.sh -n dnode1 -s start
-sql connect
-
-print ========== columnValues.sim
-
-sql drop database if exists db
-sql create database db
-sql use db
-
-run tsim/parser/columnValue_bool.sim
-run tsim/parser/columnValue_tinyint.sim
-run tsim/parser/columnValue_smallint.sim
-run tsim/parser/columnValue_int.sim
-run tsim/parser/columnValue_bigint.sim
-run tsim/parser/columnValue_float.sim
-run tsim/parser/columnValue_double.sim
-run tsim/parser/columnValue_unsign.sim
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
-
diff --git a/tests/script/tsim/parser/columnValue_bigint.sim b/tests/script/tsim/parser/columnValue_bigint.sim
index 8841418ed3..ae97835dff 100644
--- a/tests/script/tsim/parser/columnValue_bigint.sim
+++ b/tests/script/tsim/parser/columnValue_bigint.sim
@@ -1,5 +1,12 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
sql connect
-sql create database if not exists db
+
+print ========== columnValues.sim
+
+sql drop database if exists db
+sql create database db
sql use db
#### test the value of all data types in four cases: static create table, insert column value, synamic create table, alter tag value
@@ -10,78 +17,64 @@ sql create table mt_bigint (ts timestamp, c bigint) tags (tagname bigint)
## case 00: static create table for test tag values
sql create table st_bigint_0 using mt_bigint tags (NULL)
-sql select tagname from st_bigint_0
-if $data00 != NULL then
+sql show tags from st_bigint_0
+if $data05 != NULL then
return -1
endi
sql create table st_bigint_1 using mt_bigint tags (NULL)
-sql select tagname from st_bigint_1
-if $data00 != NULL then
- return -1
-endi
-sql create table st_bigint_2 using mt_bigint tags ('NULL')
-sql select tagname from st_bigint_2
-if $data00 != NULL then
- return -1
-endi
-sql create table st_bigint_3 using mt_bigint tags ('NULL')
-sql select tagname from st_bigint_3
-if $data00 != NULL then
- return -1
-endi
-sql create table st_bigint_4 using mt_bigint tags ("NULL")
-sql select tagname from st_bigint_4
-if $data00 != NULL then
- return -1
-endi
-sql create table st_bigint_5 using mt_bigint tags ("NULL")
-sql select tagname from st_bigint_5
-if $data00 != NULL then
+sql show tags from st_bigint_1
+if $data05 != NULL then
return -1
endi
+
+sql_error create table st_bigint_2 using mt_bigint tags ('NULL')
+sql_error create table st_bigint_3 using mt_bigint tags ('NULL')
+sql_error create table st_bigint_4 using mt_bigint tags ("NULL")
+sql_error create table st_bigint_5 using mt_bigint tags ("NULL")
+
sql create table st_bigint_6 using mt_bigint tags (-9223372036854775807)
-sql select tagname from st_bigint_6
-if $data00 != -9223372036854775807 then
+sql show tags from st_bigint_6
+if $data05 != -9223372036854775807 then
return -1
endi
sql create table st_bigint_7 using mt_bigint tags (9223372036854775807)
-sql select tagname from st_bigint_7
-if $data00 != 9223372036854775807 then
+sql show tags from st_bigint_7
+if $data05 != 9223372036854775807 then
return -1
endi
sql create table st_bigint_8 using mt_bigint tags (37)
-sql select tagname from st_bigint_8
-if $data00 != 37 then
+sql show tags from st_bigint_8
+if $data05 != 37 then
return -1
endi
sql create table st_bigint_9 using mt_bigint tags (-100)
-sql select tagname from st_bigint_9
-if $data00 != -100 then
+sql show tags from st_bigint_9
+if $data05 != -100 then
return -1
endi
sql create table st_bigint_10 using mt_bigint tags (+113)
-sql select tagname from st_bigint_10
-if $data00 != 113 then
+sql show tags from st_bigint_10
+if $data05 != 113 then
return -1
endi
sql create table st_bigint_11 using mt_bigint tags ('-100')
-sql select tagname from st_bigint_11
-if $data00 != -100 then
+sql show tags from st_bigint_11
+if $data05 != -100 then
return -1
endi
sql create table st_bigint_12 using mt_bigint tags ("+78")
-sql select tagname from st_bigint_12
-if $data00 != 78 then
+sql show tags from st_bigint_12
+if $data05 != 78 then
return -1
endi
sql create table st_bigint_13 using mt_bigint tags (+0078)
-sql select tagname from st_bigint_13
-if $data00 != 78 then
+sql show tags from st_bigint_13
+if $data05 != 78 then
return -1
endi
sql create table st_bigint_14 using mt_bigint tags (-00078)
-sql select tagname from st_bigint_14
-if $data00 != -78 then
+sql show tags from st_bigint_14
+if $data05 != -78 then
return -1
endi
@@ -102,38 +95,7 @@ endi
if $data01 != NULL then
return -1
endi
-sql insert into st_bigint_2 values (now, 'NULL')
-sql select * from st_bigint_2
-if $rows != 1 then
- return -1
-endi
-if $data01 != NULL then
- return -1
-endi
-sql insert into st_bigint_3 values (now, 'NULL')
-sql select * from st_bigint_3
-if $rows != 1 then
- return -1
-endi
-if $data01 != NULL then
- return -1
-endi
-sql insert into st_bigint_4 values (now, "NULL")
-sql select * from st_bigint_4
-if $rows != 1 then
- return -1
-endi
-if $data01 != NULL then
- return -1
-endi
-sql insert into st_bigint_5 values (now, "NULL")
-sql select * from st_bigint_5
-if $rows != 1 then
- return -1
-endi
-if $data01 != NULL then
- return -1
-endi
+
sql insert into st_bigint_6 values (now, 9223372036854775807)
sql select * from st_bigint_6
if $rows != 1 then
@@ -211,8 +173,8 @@ endi
## case 02: dynamic create table for test tag values
sql insert into st_bigint_16 using mt_bigint tags (NULL) values (now, NULL)
-sql select tagname from st_bigint_16
-if $data00 != NULL then
+sql show tags from st_bigint_16
+if $data05 != NULL then
return -1
endi
sql select * from st_bigint_16
@@ -221,8 +183,8 @@ if $data01 != NULL then
endi
sql insert into st_bigint_17 using mt_bigint tags (NULL) values (now, NULL)
-sql select tagname from st_bigint_17
-if $data00 != NULL then
+sql show tags from st_bigint_17
+if $data05 != NULL then
return -1
endi
sql select * from st_bigint_17
@@ -230,8 +192,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_bigint_18 using mt_bigint tags ('NULL') values (now, 'NULL')
-sql select tagname from st_bigint_18
-if $data00 != NULL then
+sql show tags from st_bigint_18
+if $data05 != NULL then
return -1
endi
sql select * from st_bigint_18
@@ -239,8 +201,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_bigint_19 using mt_bigint tags ('NULL') values (now, 'NULL')
-sql select tagname from st_bigint_19
-if $data00 != NULL then
+sql show tags from st_bigint_19
+if $data05 != NULL then
return -1
endi
sql select * from st_bigint_19
@@ -248,8 +210,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_bigint_20 using mt_bigint tags ("NULL") values (now, "NULL")
-sql select tagname from st_bigint_20
-if $data00 != NULL then
+sql show tags from st_bigint_20
+if $data05 != NULL then
return -1
endi
sql select * from st_bigint_20
@@ -257,8 +219,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_bigint_21 using mt_bigint tags ("NULL") values (now, "NULL")
-sql select tagname from st_bigint_21
-if $data00 != NULL then
+sql show tags from st_bigint_21
+if $data05 != NULL then
return -1
endi
sql select * from st_bigint_21
@@ -266,8 +228,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_bigint_22 using mt_bigint tags (9223372036854775807) values (now, 9223372036854775807)
-sql select tagname from st_bigint_22
-if $data00 != 9223372036854775807 then
+sql show tags from st_bigint_22
+if $data05 != 9223372036854775807 then
return -1
endi
sql select * from st_bigint_22
@@ -275,8 +237,8 @@ if $data01 != 9223372036854775807 then
return -1
endi
sql insert into st_bigint_23 using mt_bigint tags (-9223372036854775807) values (now, -9223372036854775807)
-sql select tagname from st_bigint_23
-if $data00 != -9223372036854775807 then
+sql show tags from st_bigint_23
+if $data05 != -9223372036854775807 then
return -1
endi
sql select * from st_bigint_23
@@ -284,8 +246,8 @@ if $data01 != -9223372036854775807 then
return -1
endi
sql insert into st_bigint_24 using mt_bigint tags (10) values (now, 10)
-sql select tagname from st_bigint_24
-if $data00 != 10 then
+sql show tags from st_bigint_24
+if $data05 != 10 then
return -1
endi
sql select * from st_bigint_24
@@ -293,8 +255,8 @@ if $data01 != 10 then
return -1
endi
sql insert into st_bigint_25 using mt_bigint tags ("-0") values (now, "-0")
-sql select tagname from st_bigint_25
-if $data00 != 0 then
+sql show tags from st_bigint_25
+if $data05 != 0 then
return -1
endi
sql select * from st_bigint_25
@@ -302,8 +264,8 @@ if $data01 != 0 then
return -1
endi
sql insert into st_bigint_26 using mt_bigint tags ('123') values (now, '123')
-sql select tagname from st_bigint_26
-if $data00 != 123 then
+sql show tags from st_bigint_26
+if $data05 != 123 then
return -1
endi
sql select * from st_bigint_26
@@ -311,8 +273,8 @@ if $data01 != 123 then
return -1
endi
sql insert into st_bigint_27 using mt_bigint tags (+056) values (now, +00056)
-sql select tagname from st_bigint_27
-if $data00 != 56 then
+sql show tags from st_bigint_27
+if $data05 != 56 then
return -1
endi
sql select * from st_bigint_27
@@ -320,8 +282,8 @@ if $data01 != 56 then
return -1
endi
sql insert into st_bigint_28 using mt_bigint tags (-056) values (now, -0056)
-sql select tagname from st_bigint_28
-if $data00 != -56 then
+sql show tags from st_bigint_28
+if $data05 != -56 then
return -1
endi
sql select * from st_bigint_28
@@ -331,50 +293,50 @@ endi
### case 03: alter tag values
#sql alter table st_bigint_0 set tag tagname=9223372036854775807
-#sql select tagname from st_bigint_0
-#if $data00 != 9223372036854775807 then
+#sql show tags from st_bigint_0
+#if $data05 != 9223372036854775807 then
# return -1
#endi
#sql alter table st_bigint_0 set tag tagname=-9223372036854775807
-#sql select tagname from st_bigint_0
-#if $data00 != -9223372036854775807 then
+#sql show tags from st_bigint_0
+#if $data05 != -9223372036854775807 then
# return -1
#endi
#sql alter table st_bigint_0 set tag tagname=+100
-#sql select tagname from st_bigint_0
-#if $data00 != 100 then
+#sql show tags from st_bigint_0
+#if $data05 != 100 then
# return -1
#endi
#sql alter table st_bigint_0 set tag tagname=-33
-#sql select tagname from st_bigint_0
-#if $data00 != -33 then
+#sql show tags from st_bigint_0
+#if $data05 != -33 then
# return -1
#endi
#sql alter table st_bigint_0 set tag tagname='+98'
-#sql select tagname from st_bigint_0
-#if $data00 != 98 then
+#sql show tags from st_bigint_0
+#if $data05 != 98 then
# return -1
#endi
#sql alter table st_bigint_0 set tag tagname='-076'
-#sql select tagname from st_bigint_0
-#if $data00 != -76 then
+#sql show tags from st_bigint_0
+#if $data05 != -76 then
# return -1
#endi
#sql alter table st_bigint_0 set tag tagname=+0012
-#sql select tagname from st_bigint_0
-#if $data00 != 12 then
+#sql show tags from st_bigint_0
+#if $data05 != 12 then
# return -1
#endi
#sql alter table st_bigint_0 set tag tagname=-00063
-#sql select tagname from st_bigint_0
-#if $data00 != -63 then
+#sql show tags from st_bigint_0
+#if $data05 != -63 then
# return -1
#endi
## case 04: illegal input
################## when overflow, auto set max
sql_error create table st_bigint_e0 using mt_bigint tags (9223372036854775808)
-sql_error create table st_bigint_e0_1 using mt_bigint tags (-9223372036854775808)
+sql create table st_bigint_e0_1 using mt_bigint tags (-9223372036854775808)
sql_error create table st_bigint_e0_2 using mt_bigint tags (92233720368547758080)
sql_error create table st_bigint_e0_3 using mt_bigint tags (-9223372036854775809)
#sql_error create table st_bigint_e0 using mt_bigint tags (12.80) truncate integer part
@@ -384,7 +346,7 @@ sql_error create table st_bigint_e0 using mt_bigint tags ("123abc")
sql_error create table st_bigint_e0 using mt_bigint tags (abc)
sql_error create table st_bigint_e0 using mt_bigint tags ("abc")
sql_error create table st_bigint_e0 using mt_bigint tags (" ")
-sql_error create table st_bigint_e0 using mt_bigint tags ('')
+sql create table st_bigint_e0_error using mt_bigint tags ('')
sql create table st_bigint_e0 using mt_bigint tags (123)
sql create table st_bigint_e1 using mt_bigint tags (123)
@@ -401,9 +363,9 @@ sql create table st_bigint_e11 using mt_bigint tags (123)
sql create table st_bigint_e12 using mt_bigint tags (123)
sql_error insert into st_bigint_e0 values (now, 9223372036854775808)
-sql_error insert into st_bigint_e1 values (now, -9223372036854775808)
+sql insert into st_bigint_e1 values (now, -9223372036854775808)
sql_error insert into st_bigint_e2 values (now, 9223372036854775809)
-sql_error insert into st_bigint_e3 values (now, -9223372036854775808)
+sql insert into st_bigint_e3 values (now, -9223372036854775808)
#sql_error insert into st_bigint_e4 values (now, 922337203.6854775808)
#sql_error insert into st_bigint_e5 values (now, -922337203685477580.9)
sql_error insert into st_bigint_e6 values (now, 123abc)
@@ -411,10 +373,10 @@ sql_error insert into st_bigint_e7 values (now, "123abc")
sql_error insert into st_bigint_e9 values (now, abc)
sql_error insert into st_bigint_e10 values (now, "abc")
sql_error insert into st_bigint_e11 values (now, " ")
-sql_error insert into st_bigint_e12 values (now, '')
+sql insert into st_bigint_e12 values (now, '')
sql_error insert into st_bigint_e13 using mt_bigint tags (033) values (now, 9223372036854775808)
-sql_error insert into st_bigint_e14 using mt_bigint tags (033) values (now, -9223372036854775808)
+sql insert into st_bigint_e14 using mt_bigint tags (033) values (now, -9223372036854775808)
sql_error insert into st_bigint_e15 using mt_bigint tags (033) values (now, 9223372036854775818)
sql_error insert into st_bigint_e16 using mt_bigint tags (033) values (now, -9923372036854775808)
#sql_error insert into st_bigint_e17 using mt_bigint tags (033) values (now, 92233720368547758.08)
@@ -424,10 +386,10 @@ sql_error insert into st_bigint_e20 using mt_bigint tags (033) values (now, "123
sql_error insert into st_bigint_e22 using mt_bigint tags (033) values (now, abc)
sql_error insert into st_bigint_e23 using mt_bigint tags (033) values (now, "abc")
sql_error insert into st_bigint_e24 using mt_bigint tags (033) values (now, " ")
-sql_error insert into st_bigint_e25 using mt_bigint tags (033) values (now, '')
+sql insert into st_bigint_e25 using mt_bigint tags (033) values (now, '')
sql_error insert into st_bigint_e13_0 using mt_bigint tags (9223372036854775808) values (now, -033)
-sql_error insert into st_bigint_e14_0 using mt_bigint tags (-9223372036854775808) values (now, -033)
+sql insert into st_bigint_e14_0 using mt_bigint tags (-9223372036854775808) values (now, -033)
sql_error insert into st_bigint_e15_0 using mt_bigint tags (9223372036854775809) values (now, -033)
sql_error insert into st_bigint_e16_0 using mt_bigint tags (-9223372036854775898) values (now, -033)
#sql_error insert into st_bigint_e17 using mt_bigint tags (12.80) values (now, -033)
@@ -437,7 +399,7 @@ sql_error insert into st_bigint_e20 using mt_bigint tags ("123abc") values (now,
sql_error insert into st_bigint_e22 using mt_bigint tags (abc) values (now, -033)
sql_error insert into st_bigint_e23 using mt_bigint tags ("abc") values (now, -033)
sql_error insert into st_bigint_e24 using mt_bigint tags (" ") values (now, -033)
-sql_error insert into st_bigint_e25 using mt_bigint tags ('') values (now, -033)
+sql insert into st_bigint_e25 using mt_bigint tags ('') values (now, -033)
sql insert into st_bigint_e13 using mt_bigint tags (033) values (now, 00062)
sql insert into st_bigint_e14 using mt_bigint tags (033) values (now, 00062)
diff --git a/tests/script/tsim/parser/columnValue_bool.sim b/tests/script/tsim/parser/columnValue_bool.sim
index 3e8c408e13..d20c4efdc0 100644
--- a/tests/script/tsim/parser/columnValue_bool.sim
+++ b/tests/script/tsim/parser/columnValue_bool.sim
@@ -1,5 +1,12 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
sql connect
-sql create database if not exists db
+
+print ========== columnValues.sim
+
+sql drop database if exists db
+sql create database db
sql use db
#### test the value of all data types in four cases: static create table, insert column value, synamic create table, alter tag value
@@ -10,110 +17,110 @@ sql create table mt_bool (ts timestamp, c bool) tags (tagname bool)
## case 00: static create table for test tag values
sql create table st_bool_0 using mt_bool tags (NULL)
-sql select tagname from st_bool_0
-if $data00 != NULL then
- print ==1== expect: NULL, actually: $data00
+sql show tags from st_bool_0
+if $data05 != NULL then
+ print ==1== expect: NULL, actually: $data05
return -1
endi
sql create table st_bool_1 using mt_bool tags (NULL)
-sql select tagname from st_bool_1
-if $data00 != NULL then
- print ==2== expect: NULL, actually: $data00
+sql show tags from st_bool_1
+if $data05 != NULL then
+ print ==2== expect: NULL, actually: $data05
return -1
endi
sql create table st_bool_2 using mt_bool tags ('NULL')
-sql select tagname from st_bool_2
-if $data00 != NULL then
- print ==3== expect: NULL, actually: $data00
+sql show tags from st_bool_2
+if $data05 != false then
+ print ==3== expect: false, actually: $data05
return -1
endi
sql create table st_bool_3 using mt_bool tags ('NULL')
-sql select tagname from st_bool_3
-if $data00 != NULL then
- print ==4== expect: NULL, actually: $data00
+sql show tags from st_bool_3
+if $data05 != false then
+ print ==4== expect: false, actually: $data05
return -1
endi
sql create table st_bool_4 using mt_bool tags ("NULL")
-sql select tagname from st_bool_4
-if $data00 != NULL then
- print ==5== expect: NULL, actually: $data00
+sql show tags from st_bool_4
+if $data05 != false then
+ print ==5== expect: false, actually: $data05
return -1
endi
sql create table st_bool_5 using mt_bool tags ("NULL")
-sql select tagname from st_bool_5
-if $data00 != NULL then
- print ==6== expect: NULL, actually: $data00
+sql show tags from st_bool_5
+if $data05 != false then
+ print ==6== expect: false, actually: $data05
return -1
endi
sql create table st_bool_6 using mt_bool tags ("true")
-sql select tagname from st_bool_6
-if $data00 != 1 then
- print ==7== expect: 1, actually: $data00
+sql show tags from st_bool_6
+if $data05 != true then
+ print ==7== expect: 1, actually: $data05
return -1
endi
sql create table st_bool_7 using mt_bool tags ('true')
-sql select tagname from st_bool_7
-if $data00 != 1 then
- print ==8== expect: 1, actually: $data00
+sql show tags from st_bool_7
+if $data05 != true then
+ print ==8== expect: 1, actually: $data05
return -1
endi
sql create table st_bool_8 using mt_bool tags (true)
-sql select tagname from st_bool_8
-if $data00 != 1 then
- print ==9== expect: 1, actually: $data00
+sql show tags from st_bool_8
+if $data05 != true then
+ print ==9== expect: 1, actually: $data05
return -1
endi
sql create table st_bool_9 using mt_bool tags ("false")
-sql select tagname from st_bool_9
-if $data00 != 0 then
- print ==10== expect: 0, actually: $data00
+sql show tags from st_bool_9
+if $data05 != false then
+ print ==10== expect: 0, actually: $data05
return -1
endi
sql create table st_bool_10 using mt_bool tags ('false')
-sql select tagname from st_bool_10
-if $data00 != 0 then
- print ==11== expect: 0, actually: $data00
+sql show tags from st_bool_10
+if $data05 != false then
+ print ==11== expect: 0, actually: $data05
return -1
endi
sql create table st_bool_11 using mt_bool tags (false)
-sql select tagname from st_bool_11
-if $data00 != 0 then
- print ==12== expect: 0, actually: $data00
+sql show tags from st_bool_11
+if $data05 != false then
+ print ==12== expect: 0, actually: $data05
return -1
endi
sql create table st_bool_12 using mt_bool tags (0)
-sql select tagname from st_bool_12
-if $data00 != 0 then
- print ==13== expect: 0, actually: $data00
+sql show tags from st_bool_12
+if $data05 != false then
+ print ==13== expect: 0, actually: $data05
return -1
endi
sql create table st_bool_13 using mt_bool tags (1)
-sql select tagname from st_bool_13
-if $data00 != 1 then
- print ==14== expect: 1, actually: $data00
+sql show tags from st_bool_13
+if $data05 != true then
+ print ==14== expect: 1, actually: $data05
return -1
endi
sql create table st_bool_14 using mt_bool tags (6.9)
-sql select tagname from st_bool_14
-if $data00 != 1 then
- print ==15== expect: 1, actually: $data00
+sql show tags from st_bool_14
+if $data05 != true then
+ print ==15== expect: 1, actually: $data05
return -1
endi
sql create table st_bool_15 using mt_bool tags (-3)
-sql select tagname from st_bool_15
-if $data00 != 1 then
+sql show tags from st_bool_15
+if $data05 != true then
print ==16== expect: 1, actually: $data00
return -1
endi
sql create table st_bool_15_0 using mt_bool tags (+300)
-sql select tagname from st_bool_15_0
-if $data00 != 1 then
+sql show tags from st_bool_15_0
+if $data05 != true then
print ==16== expect: 1, actually: $data00
return -1
endi
sql create table st_bool_15_1 using mt_bool tags (-8.03)
-sql select tagname from st_bool_15_1
-if $data00 != 1 then
+sql show tags from st_bool_15_1
+if $data05 != true then
print ==16== expect: 1, actually: $data00
return -1
endi
@@ -284,8 +291,8 @@ endi
## case 02: dynamic create table for test tag values
sql insert into st_bool_16 using mt_bool tags (NULL) values (now, NULL)
-sql select tagname from st_bool_16
-if $data00 != NULL then
+sql show tags from st_bool_16
+if $data05 != NULL then
print ==33== expect: NULL, actually: $data00
return -1
endi
@@ -296,8 +303,8 @@ if $data01 != NULL then
endi
sql insert into st_bool_17 using mt_bool tags (NULL) values (now, NULL)
-sql select tagname from st_bool_17
-if $data00 != NULL then
+sql show tags from st_bool_17
+if $data05 != NULL then
print ==35== expect: NULL, actually: $data00
return -1
endi
@@ -307,8 +314,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_bool_18 using mt_bool tags ('NULL') values (now, 'NULL')
-sql select tagname from st_bool_18
-if $data00 != NULL then
+sql show tags from st_bool_18
+if $data05 != NULL then
print ==37== expect: NULL, actually: $data00
return -1
endi
@@ -318,8 +325,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_bool_19 using mt_bool tags ('NULL') values (now, 'NULL')
-sql select tagname from st_bool_19
-if $data00 != NULL then
+sql show tags from st_bool_19
+if $data05 != NULL then
print ==39== expect: NULL, actually: $data00
return -1
endi
@@ -329,8 +336,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_bool_20 using mt_bool tags ("NULL") values (now, "NULL")
-sql select tagname from st_bool_20
-if $data00 != NULL then
+sql show tags from st_bool_20
+if $data05 != NULL then
print ==41== expect: NULL, actually: $data00
return -1
endi
@@ -340,8 +347,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_bool_21 using mt_bool tags ("NULL") values (now, "NULL")
-sql select tagname from st_bool_21
-if $data00 != NULL then
+sql show tags from st_bool_21
+if $data05 != NULL then
print ==43== expect: NULL, actually: $data00
return -1
endi
@@ -351,8 +358,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_bool_22 using mt_bool tags ("true") values (now, "true")
-sql select tagname from st_bool_22
-if $data00 != 1 then
+sql show tags from st_bool_22
+if $data05 != true then
print ==45== expect: 1, actually: $data00
return -1
endi
@@ -362,8 +369,8 @@ if $data01 != 1 then
return -1
endi
sql insert into st_bool_23 using mt_bool tags ('true') values (now, 'true')
-sql select tagname from st_bool_23
-if $data00 != 1 then
+sql show tags from st_bool_23
+if $data05 != true then
print ==47== expect: 1, actually: $data00
return -1
endi
@@ -373,8 +380,8 @@ if $data01 != 1 then
return -1
endi
sql insert into st_bool_24 using mt_bool tags (true) values (now, true)
-sql select tagname from st_bool_24
-if $data00 != 1 then
+sql show tags from st_bool_24
+if $data05 != true then
print ==49== expect: 1, actually: $data00
return -1
endi
@@ -384,8 +391,8 @@ if $data01 != 1 then
return -1
endi
sql insert into st_bool_25 using mt_bool tags ("false") values (now, "false")
-sql select tagname from st_bool_25
-if $data00 != 0 then
+sql show tags from st_bool_25
+if $data05 != false then
print ==51== expect: 0, actually: $data00
return -1
endi
@@ -395,8 +402,8 @@ if $data01 != 0 then
return -1
endi
sql insert into st_bool_26 using mt_bool tags ('false') values (now, 'false')
-sql select tagname from st_bool_26
-if $data00 != 0 then
+sql show tags from st_bool_26
+if $data05 != false then
print ==53== expect: 0, actually: $data00
return -1
endi
@@ -406,8 +413,8 @@ if $data01 != 0 then
return -1
endi
sql insert into st_bool_27 using mt_bool tags (false) values (now, false)
-sql select tagname from st_bool_27
-if $data00 != 0 then
+sql show tags from st_bool_27
+if $data05 != false then
print ==55== expect: 0, actually: $data00
return -1
endi
@@ -417,8 +424,8 @@ if $data01 != 0 then
return -1
endi
sql insert into st_bool_28 using mt_bool tags (0) values (now, 0)
-sql select tagname from st_bool_28
-if $data00 != 0 then
+sql show tags from st_bool_28
+if $data05 != false then
print ==57== expect: 0, actually: $data00
return -1
endi
@@ -428,8 +435,8 @@ if $data01 != 0 then
return -1
endi
sql insert into st_bool_29 using mt_bool tags (1) values (now, 1)
-sql select tagname from st_bool_29
-if $data00 != 1 then
+sql show tags from st_bool_29
+if $data05 != true then
print ==59== expect: 1, actually: $data00
return -1
endi
@@ -439,8 +446,8 @@ if $data01 != 1 then
return -1
endi
sql insert into st_bool_30 using mt_bool tags (6.9) values (now, 6.9)
-sql select tagname from st_bool_30
-if $data00 != 1 then
+sql show tags from st_bool_30
+if $data05 != true then
print ==61== expect: 1, actually: $data00
return -1
endi
@@ -450,8 +457,8 @@ if $data01 != 1 then
return -1
endi
sql insert into st_bool_31 using mt_bool tags (-3) values (now, -3)
-sql select tagname from st_bool_31
-if $data00 != 1 then
+sql show tags from st_bool_31
+if $data05 != true then
print ==63== expect: 1, actually: $data00
return -1
endi
@@ -461,8 +468,8 @@ if $data01 != 1 then
return -1
endi
sql insert into st_bool_32 using mt_bool tags (+300) values (now, +300)
-sql select tagname from st_bool_32
-if $data00 != 1 then
+sql show tags from st_bool_32
+if $data05 != true then
print ==63== expect: 1, actually: $data00
return -1
endi
@@ -472,8 +479,8 @@ if $data01 != 1 then
return -1
endi
sql insert into st_bool_33 using mt_bool tags (+30.890) values (now, +30.890)
-sql select tagname from st_bool_33
-if $data00 != 1 then
+sql show tags from st_bool_33
+if $data05 != true then
print ==63== expect: 1, actually: $data00
return -1
endi
@@ -490,140 +497,140 @@ endi
## case 03: alter tag values
#sql alter table st_bool_0 set tag tagname=true
-#sql select tagname from st_bool_0
+#sql show tags from st_bool_0
#if $data00 != true then
# return -1
#endi
#sql alter table st_bool_0 set tag tagname=NULL
-#sql select tagname from st_bool_0
+#sql show tags from st_bool_0
#if $data00 != NULL then
# return -1
#endi
#sql alter table st_bool_0 set tag tagname=false
-#sql select tagname from st_bool_0
+#sql show tags from st_bool_0
#if $data00 != false then
# return -1
#endi
#sql alter table st_bool_0 set tag tagname=NULL
-#sql select tagname from st_bool_0
+#sql show tags from st_bool_0
#if $data00 != NULL then
# return -1
#endi
#sql alter table st_bool_0 set tag tagname='true'
-#sql select tagname from st_bool_0
+#sql show tags from st_bool_0
#if $data00 != true then
# return -1
#endi
#sql alter table st_bool_0 set tag tagname='NULL'
-#sql select tagname from st_bool_0
+#sql show tags from st_bool_0
#if $data00 != NULL then
# return -1
#endi
#sql alter table st_bool_0 set tag tagname='false'
-#sql select tagname from st_bool_0
+#sql show tags from st_bool_0
#if $data00 != false then
# return -1
#endi
#sql alter table st_bool_0 set tag tagname='NULL'
-#sql select tagname from st_bool_0
+#sql show tags from st_bool_0
#if $data00 != NULL then
# return -1
#endi
#sql alter table st_bool_0 set tag tagname="true"
-#sql select tagname from st_bool_0
+#sql show tags from st_bool_0
#if $data00 != true then
# return -1
#endi
#sql alter table st_bool_0 set tag tagname="NULL"
-#sql select tagname from st_bool_0
+#sql show tags from st_bool_0
#if $data00 != NULL then
# return -1
#endi
#sql alter table st_bool_0 set tag tagname="false"
-#sql select tagname from st_bool_0
+#sql show tags from st_bool_0
#if $data00 != false then
# return -1
#endi
#sql alter table st_bool_0 set tag tagname="NULL"
-#sql select tagname from st_bool_0
+#sql show tags from st_bool_0
#if $data00 != NULL then
# return -1
#endi
#sql alter table st_bool_0 set tag tagname=1
-#sql select tagname from st_bool_0
+#sql show tags from st_bool_0
#if $data00 != true then
# return -1
#endi
#sql alter table st_bool_0 set tag tagname=0
-#sql select tagname from st_bool_0
+#sql show tags from st_bool_0
#if $data00 != false then
# return -1
#endi
#sql alter table st_bool_0 set tag tagname=6.9
-#sql select tagname from st_bool_0
+#sql show tags from st_bool_0
#if $data00 != true then
# return -1
#endi
#sql alter table st_bool_0 set tag tagname=-3
-#sql select tagname from st_bool_0
+#sql show tags from st_bool_0
#if $data00 != true then
# return -1
#endi
# case 04: illegal input
sql_error create table st_bool_e0 using mt_bool tags (123abc)
-sql_error create table st_bool_e1 using mt_bool tags ("123abc")
-sql_error create table st_bool_e2 using mt_bool tags ("123")
+sql create table st_bool_e1 using mt_bool tags ("123abc")
+sql create table st_bool_e2 using mt_bool tags ("123")
sql_error create table st_bool_e3 using mt_bool tags (abc)
-sql_error create table st_bool_e4 using mt_bool tags ("abc")
-sql_error create table st_bool_e5 using mt_bool tags (" ")
-sql_error create table st_bool_e6 using mt_bool tags ('')
+sql create table st_bool_e4 using mt_bool tags ("abc")
+sql create table st_bool_e5 using mt_bool tags (" ")
+sql create table st_bool_e6 using mt_bool tags ('')
-sql create table st_bool_e0 using mt_bool tags (true)
-sql create table st_bool_e1 using mt_bool tags (true)
-sql create table st_bool_e2 using mt_bool tags (true)
-sql create table st_bool_e3 using mt_bool tags (true)
-sql create table st_bool_e4 using mt_bool tags (true)
-sql create table st_bool_e5 using mt_bool tags (true)
-sql create table st_bool_e6 using mt_bool tags (true)
+sql create table st_bool_f0 using mt_bool tags (true)
+sql create table st_bool_f1 using mt_bool tags (true)
+sql create table st_bool_f2 using mt_bool tags (true)
+sql create table st_bool_f3 using mt_bool tags (true)
+sql create table st_bool_f4 using mt_bool tags (true)
+sql create table st_bool_f5 using mt_bool tags (true)
+sql create table st_bool_f6 using mt_bool tags (true)
-sql_error insert into st_bool_e0 values (now, 123abc)
-sql_error insert into st_bool_e1 values (now, "123abc")
-sql_error insert into st_bool_e2 values (now, "123")
-sql_error insert into st_bool_e3 values (now, abc)
-sql_error insert into st_bool_e4 values (now, "abc")
-sql_error insert into st_bool_e5 values (now, " ")
-sql_error insert into st_bool_e6 values (now, '')
+sql_error insert into st_bool_g0 values (now, 123abc)
+sql_error insert into st_bool_g1 values (now, "123abc")
+sql_error insert into st_bool_g2 values (now, "123")
+sql_error insert into st_bool_g3 values (now, abc)
+sql_error insert into st_bool_g4 values (now, "abc")
+sql_error insert into st_bool_g5 values (now, " ")
+sql_error insert into st_bool_g6 values (now, '')
-sql_error insert into st_bool_e10 using mt_bool tags (123abc) values (now, 1)
-sql_error insert into st_bool_e11 using mt_bool tags ("123abc") values (now, 1)
-sql_error insert into st_bool_e12 using mt_bool tags ("123") values (now, 1)
-sql_error insert into st_bool_e13 using mt_bool tags (abc) values (now, 1)
-sql_error insert into st_bool_e14 using mt_bool tags ("abc") values (now, 1)
-sql_error insert into st_bool_e15 using mt_bool tags (" ") values (now, 1)
-sql_error insert into st_bool_e16 using mt_bool tags ('') values (now, 1)
+sql_error insert into st_bool_h0 using mt_bool tags (123abc) values (now, 1)
+sql_error insert into st_bool_h1 using mt_bool tags ("123abc") values (now, 1)
+sql_error insert into st_bool_h2 using mt_bool tags ("123") values (now, 1)
+sql_error insert into st_bool_h3 using mt_bool tags (abc) values (now, 1)
+sql_error insert into st_bool_h4 using mt_bool tags ("abc") values (now, 1)
+sql_error insert into st_bool_h5 using mt_bool tags (" ") values (now, 1)
+sql_error insert into st_bool_h6 using mt_bool tags ('') values (now, 1)
-sql_error insert into st_bool_e17 using mt_bool tags (1) values (now, 123abc)
-sql_error insert into st_bool_e18 using mt_bool tags (1) values (now, "123abc")
-sql_error insert into st_bool_e19 using mt_bool tags (1) values (now, "123")
-sql_error insert into st_bool_e20 using mt_bool tags (1) values (now, abc)
-sql_error insert into st_bool_e21 using mt_bool tags (1) values (now, "abc")
-sql_error insert into st_bool_e22 using mt_bool tags (1) values (now, " ")
-sql_error insert into st_bool_e23 using mt_bool tags (1) values (now, '')
+sql_error insert into st_bool_h0 using mt_bool tags (1) values (now, 123abc)
+sql_error insert into st_bool_h1 using mt_bool tags (1) values (now, "123abc")
+sql_error insert into st_bool_h2 using mt_bool tags (1) values (now, "123")
+sql_error insert into st_bool_h3 using mt_bool tags (1) values (now, abc)
+sql_error insert into st_bool_h4 using mt_bool tags (1) values (now, "abc")
+sql_error insert into st_bool_h5 using mt_bool tags (1) values (now, " ")
+sql_error insert into st_bool_h6 using mt_bool tags (1) values (now, '')
-sql insert into st_bool_e10 using mt_bool tags (1) values (now, 1)
-sql insert into st_bool_e11 using mt_bool tags (1) values (now, 1)
-sql insert into st_bool_e12 using mt_bool tags (1) values (now, 1)
-sql insert into st_bool_e13 using mt_bool tags (1) values (now, 1)
-sql insert into st_bool_e14 using mt_bool tags (1) values (now, 1)
-sql insert into st_bool_e15 using mt_bool tags (1) values (now, 1)
-sql insert into st_bool_e16 using mt_bool tags (1) values (now, 1)
+sql insert into st_bool_i0 using mt_bool tags (1) values (now, 1)
+sql insert into st_bool_i1 using mt_bool tags (1) values (now, 1)
+sql insert into st_bool_i2 using mt_bool tags (1) values (now, 1)
+sql insert into st_bool_i3 using mt_bool tags (1) values (now, 1)
+sql insert into st_bool_i4 using mt_bool tags (1) values (now, 1)
+sql insert into st_bool_i5 using mt_bool tags (1) values (now, 1)
+sql insert into st_bool_i6 using mt_bool tags (1) values (now, 1)
-sql_error alter table st_bool_e10 set tag tagname=123abc
-sql_error alter table st_bool_e11 set tag tagname="123abc"
-sql_error alter table st_bool_e12 set tag tagname="123"
-sql_error alter table st_bool_e13 set tag tagname=abc
-sql_error alter table st_bool_e14 set tag tagname="abc"
-sql_error alter table st_bool_e15 set tag tagname=" "
-sql_error alter table st_bool_e16 set tag tagname=''
+sql_error alter table st_bool_i0 set tag tagname=123abc
+sql alter table st_bool_i1 set tag tagname="123abc"
+sql alter table st_bool_i2 set tag tagname="123"
+sql_error alter table st_bool_i3 set tag tagname=abc
+sql alter table st_bool_i4 set tag tagname="abc"
+sql alter table st_bool_i5 set tag tagname=" "
+sql alter table st_bool_i6 set tag tagname=''
diff --git a/tests/script/tsim/parser/columnValue_double.sim b/tests/script/tsim/parser/columnValue_double.sim
index c7ba7b0048..dae64735ea 100644
--- a/tests/script/tsim/parser/columnValue_double.sim
+++ b/tests/script/tsim/parser/columnValue_double.sim
@@ -1,5 +1,12 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
sql connect
-sql create database if not exists db
+
+print ========== columnValues.sim
+
+sql drop database if exists db
+sql create database db
sql use db
#### test the value of all data types in four cases: static create table, insert column value, synamic create table, alter tag value
@@ -10,135 +17,135 @@ sql create table mt_double (ts timestamp, c double) tags (tagname double)
## case 00: static create table for test tag values
sql create table st_double_0 using mt_double tags (NULL )
-sql select tagname from st_double_0
-if $data00 != NULL then
+sql show tags from st_double_0
+if $data05 != NULL then
return -1
endi
sql create table st_double_1 using mt_double tags (NULL)
-sql select tagname from st_double_1
-if $data00 != NULL then
+sql show tags from st_double_1
+if $data05 != NULL then
return -1
endi
sql create table st_double_2 using mt_double tags ('NULL')
-sql select tagname from st_double_2
-if $data00 != NULL then
+sql show tags from st_double_2
+if $data05 != 0.000000000 then
return -1
endi
sql create table st_double_3 using mt_double tags ('NULL')
-sql select tagname from st_double_3
-if $data00 != NULL then
+sql show tags from st_double_3
+if $data05 != 0.000000000 then
return -1
endi
sql create table st_double_4 using mt_double tags ("NULL")
-sql select tagname from st_double_4
-if $data00 != NULL then
+sql show tags from st_double_4
+if $data05 != 0.000000000 then
return -1
endi
sql create table st_double_5 using mt_double tags ("NULL")
-sql select tagname from st_double_5
-if $data00 != NULL then
+sql show tags from st_double_5
+if $data05 != 0.000000000 then
return -1
endi
sql create table st_double_6 using mt_double tags (-123.321)
-sql select tagname from st_double_6
-if $data00 != -123.321000000 then
- print expect -123.321000000, actual: $data00
+sql show tags from st_double_6
+if $data05 != -123.321000000 then
+ print expect -123.321000000, actual: $data05
return -1
endi
sql create table st_double_7 using mt_double tags (+1.567)
-sql select tagname from st_double_7
-if $data00 != 1.567000000 then
+sql show tags from st_double_7
+if $data05 != 1.567000000 then
return -1
endi
sql create table st_double_8 using mt_double tags (379.001)
-sql select tagname from st_double_8
-if $data00 != 379.001000000 then
+sql show tags from st_double_8
+if $data05 != 379.001000000 then
return -1
endi
sql create table st_double_9 using mt_double tags (1.5e+3)
-sql select tagname from st_double_9
-if $data00 != 1500.000000000 then
+sql show tags from st_double_9
+if $data05 != 1500.000000000 then
return -1
endi
sql create table st_double_10 using mt_double tags (-1.5e-3)
-sql select tagname from st_double_10
-if $data00 != -0.001500000 then
+sql show tags from st_double_10
+if $data05 != -0.001500000 then
return -1
endi
sql create table st_double_11 using mt_double tags (+1.5e+3)
-sql select tagname from st_double_11
-if $data00 != 1500.000000000 then
+sql show tags from st_double_11
+if $data05 != 1500.000000000 then
return -1
endi
sql create table st_double_12 using mt_double tags (-1.5e+3)
-sql select tagname from st_double_12
-if $data00 != -1500.000000000 then
+sql show tags from st_double_12
+if $data05 != -1500.000000000 then
return -1
endi
sql create table st_double_13 using mt_double tags (1.5e-3)
-sql select tagname from st_double_13
-if $data00 != 0.001500000 then
+sql show tags from st_double_13
+if $data05 != 0.001500000 then
return -1
endi
sql create table st_double_14 using mt_double tags (1.5E-3)
-sql select tagname from st_double_14
-if $data00 != 0.001500000 then
+sql show tags from st_double_14
+if $data05 != 0.001500000 then
return -1
endi
sql create table st_double_6_0 using mt_double tags ('-123.321')
-sql select tagname from st_double_6_0
-if $data00 != -123.321000000 then
+sql show tags from st_double_6_0
+if $data05 != -123.321000000 then
return -1
endi
sql create table st_double_7_0 using mt_double tags ('+1.567')
-sql select tagname from st_double_7_0
-if $data00 != 1.567000000 then
+sql show tags from st_double_7_0
+if $data05 != 1.567000000 then
return -1
endi
sql create table st_double_8_0 using mt_double tags ('379.001')
-sql select tagname from st_double_8_0
-if $data00 != 379.001000000 then
+sql show tags from st_double_8_0
+if $data05 != 379.001000000 then
return -1
endi
sql create table st_double_9_0 using mt_double tags ('1.5e+3')
-sql select tagname from st_double_9_0
-if $data00 != 1500.000000000 then
+sql show tags from st_double_9_0
+if $data05 != 1500.000000000 then
return -1
endi
sql create table st_double_10_0 using mt_double tags ('-1.5e-3')
-sql select tagname from st_double_10_0
-if $data00 != -0.001500000 then
+sql show tags from st_double_10_0
+if $data05 != -0.001500000 then
return -1
endi
sql create table st_double_11_0 using mt_double tags ('+1.5e+3')
-sql select tagname from st_double_11_0
-if $data00 != 1500.000000000 then
+sql show tags from st_double_11_0
+if $data05 != 1500.000000000 then
return -1
endi
sql create table st_double_12_0 using mt_double tags ('-1.5e+3')
-sql select tagname from st_double_12_0
-if $data00 != -1500.000000000 then
+sql show tags from st_double_12_0
+if $data05 != -1500.000000000 then
return -1
endi
sql create table st_double_13_0 using mt_double tags ('1.5e-3')
-sql select tagname from st_double_13_0
-if $data00 != 0.001500000 then
+sql show tags from st_double_13_0
+if $data05 != 0.001500000 then
return -1
endi
sql create table st_double_14_0 using mt_double tags ('1.5E-3')
-sql select tagname from st_double_14_0
-if $data00 != 0.001500000 then
+sql show tags from st_double_14_0
+if $data05 != 0.001500000 then
return -1
endi
sql create table st_double_15_0 using mt_double tags (1.7976931348623157e+308)
-sql select tagname from st_double_15_0
-#if $data00 != 0.001500000 then
+sql show tags from st_double_15_0
+#if $data05 != 0.001500000 then
# return -1
#endi
sql create table st_double_16_0 using mt_double tags (-1.7976931348623157e+308)
-sql select tagname from st_double_16_0
-#if $data00 != 0.001500000 then
+sql show tags from st_double_16_0
+#if $data05 != 0.001500000 then
# return -1
#endi
@@ -270,8 +277,8 @@ endi
## case 02: dynamic create table for test tag values
sql insert into st_double_16 using mt_double tags (NULL ) values (now, NULL )
-sql select tagname from st_double_16
-if $data00 != NULL then
+sql show tags from st_double_16
+if $data05 != NULL then
return -1
endi
sql select * from st_double_16
@@ -280,8 +287,8 @@ if $data01 != NULL then
endi
sql insert into st_double_17 using mt_double tags (NULL) values (now, NULL)
-sql select tagname from st_double_17
-if $data00 != NULL then
+sql show tags from st_double_17
+if $data05 != NULL then
return -1
endi
sql select * from st_double_17
@@ -289,8 +296,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_double_18 using mt_double tags ('NULL') values (now, 'NULL')
-sql select tagname from st_double_18
-if $data00 != NULL then
+sql show tags from st_double_18
+if $data05 != NULL then
return -1
endi
sql select * from st_double_18
@@ -298,8 +305,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_double_19 using mt_double tags ('NULL') values (now, 'NULL')
-sql select tagname from st_double_19
-if $data00 != NULL then
+sql show tags from st_double_19
+if $data05 != NULL then
return -1
endi
sql select * from st_double_19
@@ -307,8 +314,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_double_20 using mt_double tags ("NULL") values (now, "NULL")
-sql select tagname from st_double_20
-if $data00 != NULL then
+sql show tags from st_double_20
+if $data05 != NULL then
return -1
endi
sql select * from st_double_20
@@ -316,8 +323,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_double_21 using mt_double tags ("NULL") values (now, "NULL")
-sql select tagname from st_double_21
-if $data00 != NULL then
+sql show tags from st_double_21
+if $data05 != NULL then
return -1
endi
sql select * from st_double_21
@@ -325,8 +332,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_double_22 using mt_double tags (127) values (now, 1.7976931348623157e+308)
-sql select tagname from st_double_22
-#if $data00 != 127 then
+sql show tags from st_double_22
+#if $data05 != 127 then
# return -1
#endi
sql select * from st_double_22
@@ -334,8 +341,8 @@ sql select * from st_double_22
# return -1
#endi
sql insert into st_double_23 using mt_double tags (-127) values (now, -1.7976931348623157e+308)
-sql select tagname from st_double_23
-#if $data00 != -127 then
+sql show tags from st_double_23
+#if $data05 != -127 then
# return -1
#endi
sql select * from st_double_23
@@ -343,8 +350,8 @@ sql select * from st_double_23
# return -1
#endi
sql insert into st_double_24 using mt_double tags (10) values (now, 10)
-sql select tagname from st_double_24
-#if $data00 != 10 then
+sql show tags from st_double_24
+#if $data05 != 10 then
# return -1
#endi
sql select * from st_double_24
@@ -352,8 +359,8 @@ sql select * from st_double_24
# return -1
#endi
sql insert into st_double_25 using mt_double tags ("-0") values (now, "-0")
-sql select tagname from st_double_25
-#if $data00 != 0 then
+sql show tags from st_double_25
+#if $data05 != 0 then
# return -1
#endi
sql select * from st_double_25
@@ -361,8 +368,8 @@ sql select * from st_double_25
# return -1
#endi
sql insert into st_double_26 using mt_double tags ('123') values (now, '12.3')
-sql select tagname from st_double_26
-#if $data00 != 123 then
+sql show tags from st_double_26
+#if $data05 != 123 then
# return -1
#endi
sql select * from st_double_26
@@ -370,8 +377,8 @@ sql select * from st_double_26
# return -1
#endi
sql insert into st_double_27 using mt_double tags (+056) values (now, +0005.6)
-sql select tagname from st_double_27
-#if $data00 != 56 then
+sql show tags from st_double_27
+#if $data05 != 56 then
# return -1
#endi
sql select * from st_double_27
@@ -379,8 +386,8 @@ sql select * from st_double_27
# return -1
#endi
sql insert into st_double_28 using mt_double tags (-056) values (now, -005.6)
-sql select tagname from st_double_28
-#if $data00 != -56 then
+sql show tags from st_double_28
+#if $data05 != -56 then
# return -1
#endi
sql select * from st_double_28
@@ -390,43 +397,43 @@ sql select * from st_double_28
### case 03: alter tag values
#sql alter table st_double_0 set tag tagname=1.7976931348623157e+308
-#sql select tagname from st_double_0
-##if $data00 != 127 then
+#sql show tags from st_double_0
+##if $data05 != 127 then
## return -1
##endi
#sql alter table st_double_0 set tag tagname=-1.7976931348623157e+308
-#sql select tagname from st_double_0
-##if $data00 != -127 then
+#sql show tags from st_double_0
+##if $data05 != -127 then
## return -1
##endi
#sql alter table st_double_0 set tag tagname=+10.340
-#sql select tagname from st_double_0
-##if $data00 != 100 then
+#sql show tags from st_double_0
+##if $data05 != 100 then
## return -1
##endi
#sql alter table st_double_0 set tag tagname=-33.87
-#sql select tagname from st_double_0
-##if $data00 != -33 then
+#sql show tags from st_double_0
+##if $data05 != -33 then
## return -1
##endi
#sql alter table st_double_0 set tag tagname='+9.8'
-#sql select tagname from st_double_0
-##if $data00 != 98 then
+#sql show tags from st_double_0
+##if $data05 != 98 then
## return -1
##endi
#sql alter table st_double_0 set tag tagname='-07.6'
-#sql select tagname from st_double_0
-##if $data00 != -76 then
+#sql show tags from st_double_0
+##if $data05 != -76 then
## return -1
##endi
#sql alter table st_double_0 set tag tagname=+0012.871
-#sql select tagname from st_double_0
-##if $data00 != 12 then
+#sql show tags from st_double_0
+##if $data05 != 12 then
## return -1
##endi
#sql alter table st_double_0 set tag tagname=-00063.582
-#sql select tagname from st_double_0
-##if $data00 != -63 then
+#sql show tags from st_double_0
+##if $data05 != -63 then
## return -1
##endi
@@ -438,11 +445,11 @@ sql_error create table st_double_e0 using mt_double tags (-31.7976931348623157e+
#sql_error create table st_double_e0 using mt_double tags (12.80) truncate integer part
#sql_error create table st_double_e0 using mt_double tags (-11.80)
sql_error create table st_double_e0 using mt_double tags (123abc)
-sql_error create table st_double_e0 using mt_double tags ("123abc")
+sql create table st_double_e0_1 using mt_double tags ("123abc")
sql_error create table st_double_e0 using mt_double tags (abc)
-sql_error create table st_double_e0 using mt_double tags ("abc")
-sql_error create table st_double_e0 using mt_double tags (" ")
-sql_error create table st_double_e0 using mt_double tags ('')
+sql create table st_double_e0_2 using mt_double tags ("abc")
+sql create table st_double_e0_3 using mt_double tags (" ")
+sql create table st_double_e0_4 using mt_double tags ('')
sql create table st_double_e0 using mt_double tags (123)
sql create table st_double_e1 using mt_double tags (123)
@@ -469,7 +476,7 @@ sql_error insert into st_double_e7 values (now, "123abc")
sql_error insert into st_double_e9 values (now, abc)
sql_error insert into st_double_e10 values (now, "abc")
sql_error insert into st_double_e11 values (now, " ")
-sql_error insert into st_double_e12 values (now, '')
+sql insert into st_double_e12 values (now, '')
sql_error insert into st_double_e13 using mt_double tags (033) values (now, 11.7976931348623157e+308)
sql_error insert into st_double_e14 using mt_double tags (033) values (now, -11.7976931348623157e+308)
@@ -482,7 +489,7 @@ sql_error insert into st_double_e20 using mt_double tags (033) values (now, "123
sql_error insert into st_double_e22 using mt_double tags (033) values (now, abc)
sql_error insert into st_double_e23 using mt_double tags (033) values (now, "abc")
sql_error insert into st_double_e24 using mt_double tags (033) values (now, " ")
-sql_error insert into st_double_e25 using mt_double tags (033) values (now, '')
+sql insert into st_double_e25_1 using mt_double tags (033) values (now, '')
sql_error insert into st_double_e13 using mt_double tags (31.7976931348623157e+308) values (now, -033)
sql_error insert into st_double_e14 using mt_double tags (-31.7976931348623157e+308) values (now, -033)
@@ -495,7 +502,7 @@ sql_error insert into st_double_e20 using mt_double tags ("123abc") values (now,
sql_error insert into st_double_e22 using mt_double tags (abc) values (now, -033)
sql_error insert into st_double_e23 using mt_double tags ("abc") values (now, -033)
sql_error insert into st_double_e24 using mt_double tags (" ") values (now, -033)
-sql_error insert into st_double_e25 using mt_double tags ('') values (now, -033)
+sql insert into st_double_e25 using mt_double tags ('') values (now, -033)
sql insert into st_double_e13 using mt_double tags (033) values (now, 00062)
sql insert into st_double_e14 using mt_double tags (033) values (now, 00062)
@@ -516,8 +523,8 @@ sql_error alter table st_double_e14 set tag tagname=-1.8976931348623157e+308
sql_error alter table st_double_e15 set tag tagname=131.7976931348623157e+308
sql_error alter table st_double_e16 set tag tagname=-131.7976931348623157e+308
sql_error alter table st_double_e19 set tag tagname=123abc
-sql_error alter table st_double_e20 set tag tagname="123abc"
+sql alter table st_double_e20 set tag tagname="123abc"
sql_error alter table st_double_e22 set tag tagname=abc
-sql_error alter table st_double_e23 set tag tagname="abc"
-sql_error alter table st_double_e24 set tag tagname=" "
-sql_error alter table st_double_e25 set tag tagname=''
+sql alter table st_double_e23 set tag tagname="abc"
+sql alter table st_double_e24 set tag tagname=" "
+sql alter table st_double_e25 set tag tagname=''
diff --git a/tests/script/tsim/parser/columnValue_float.sim b/tests/script/tsim/parser/columnValue_float.sim
index 8fca0d4671..9b0ca4b186 100644
--- a/tests/script/tsim/parser/columnValue_float.sim
+++ b/tests/script/tsim/parser/columnValue_float.sim
@@ -1,5 +1,12 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
sql connect
-sql create database if not exists db
+
+print ========== columnValues.sim
+
+sql drop database if exists db
+sql create database db
sql use db
#### test the value of all data types in four cases: static create table, insert column value, synamic create table, alter tag value
@@ -10,152 +17,152 @@ sql create table mt_float (ts timestamp, c float) tags (tagname float)
## case 00: static create table for test tag values
sql create table st_float_0 using mt_float tags (NULL)
-sql select tagname from st_float_0
-if $data00 != NULL then
+sql show tags from st_float_0
+if $data05 != NULL then
return -1
endi
sql create table st_float_1 using mt_float tags (NULL)
-sql select tagname from st_float_1
-if $data00 != NULL then
+sql show tags from st_float_1
+if $data05 != NULL then
return -1
endi
sql create table st_float_2 using mt_float tags ('NULL')
-sql select tagname from st_float_2
-if $data00 != NULL then
+sql show tags from st_float_2
+if $data05 != 0.00000 then
return -1
endi
sql create table st_float_3 using mt_float tags ('NULL')
-sql select tagname from st_float_3
-if $data00 != NULL then
+sql show tags from st_float_3
+if $data05 != 0.00000 then
return -1
endi
sql create table st_float_4 using mt_float tags ("NULL")
-sql select tagname from st_float_4
-if $data00 != NULL then
+sql show tags from st_float_4
+if $data05 != 0.00000 then
return -1
endi
sql create table st_float_5 using mt_float tags ("NULL")
-sql select tagname from st_float_5
-if $data00 != NULL then
+sql show tags from st_float_5
+if $data05 != 0.00000 then
return -1
endi
sql create table st_float_6 using mt_float tags (-123.321)
-sql select tagname from st_float_6
-if $data00 != -123.32100 then
- print expect -123.32100, actual: $data00
+sql show tags from st_float_6
+if $data05 != -123.32100 then
+ print expect -123.32100, actual: $data05
return -1
endi
sql create table st_float_7 using mt_float tags (+1.567)
-sql select tagname from st_float_7
-if $data00 != 1.56700 then
- print expect 1.56700, actual: $data00
+sql show tags from st_float_7
+if $data05 != 1.56700 then
+ print expect 1.56700, actual: $data05
return -1
endi
sql create table st_float_8 using mt_float tags (379.001)
-sql select tagname from st_float_8
-if $data00 != 379.00101 then
- print expect 379.00101, actual: $data00
+sql show tags from st_float_8
+if $data05 != 379.00101 then
+ print expect 379.00101, actual: $data05
return -1
endi
sql create table st_float_9 using mt_float tags (1.5e+3)
-sql select tagname from st_float_9
-if $data00 != 1500.00000 then
- print expect 1500.00000, actual: $data00
+sql show tags from st_float_9
+if $data05 != 1500.00000 then
+ print expect 1500.00000, actual: $data05
return -1
endi
sql create table st_float_10 using mt_float tags (-1.5e-3)
-sql select tagname from st_float_10
-if $data00 != -0.00150 then
- print expect -0.00150, actual: $data00
+sql show tags from st_float_10
+if $data05 != -0.00150 then
+ print expect -0.00150, actual: $data05
return -1
endi
sql create table st_float_11 using mt_float tags (+1.5e+3)
-sql select tagname from st_float_11
-if $data00 != 1500.00000 then
- print expect 1500.00000, actual: $data00
+sql show tags from st_float_11
+if $data05 != 1500.00000 then
+ print expect 1500.00000, actual: $data05
return -1
endi
sql create table st_float_12 using mt_float tags (-1.5e+3)
-sql select tagname from st_float_12
-if $data00 != -1500.00000 then
- print expect -1500.00000, actual: $data00
+sql show tags from st_float_12
+if $data05 != -1500.00000 then
+ print expect -1500.00000, actual: $data05
return -1
endi
sql create table st_float_13 using mt_float tags (1.5e-3)
-sql select tagname from st_float_13
-if $data00 != 0.00150 then
- print expect 0.00150, actual: $data00
+sql show tags from st_float_13
+if $data05 != 0.00150 then
+ print expect 0.00150, actual: $data05
return -1
endi
sql create table st_float_14 using mt_float tags (1.5E-3)
-sql select tagname from st_float_14
-if $data00 != 0.00150 then
- print expect 0.00150, actual: $data00
+sql show tags from st_float_14
+if $data05 != 0.00150 then
+ print expect 0.00150, actual: $data05
return -1
endi
sql create table st_float_6_0 using mt_float tags ('-123.321')
-sql select tagname from st_float_6_0
-if $data00 != -123.32100 then
- print expect -123.32100, actual: $data00
+sql show tags from st_float_6_0
+if $data05 != -123.32100 then
+ print expect -123.32100, actual: $data05
return -1
endi
sql create table st_float_7_0 using mt_float tags ('+1.567')
-sql select tagname from st_float_7_0
-if $data00 != 1.56700 then
- print expect 1.56700, actual: $data00
+sql show tags from st_float_7_0
+if $data05 != 1.56700 then
+ print expect 1.56700, actual: $data05
return -1
endi
sql create table st_float_8_0 using mt_float tags ('379.001')
-sql select tagname from st_float_8_0
-if $data00 != 379.00101 then
- print expect 379.00101, actual: $data00
+sql show tags from st_float_8_0
+if $data05 != 379.00101 then
+ print expect 379.00101, actual: $data05
return -1
endi
sql create table st_float_9_0 using mt_float tags ('1.5e+3')
-sql select tagname from st_float_9_0
-if $data00 != 1500.00000 then
- print expect 1500.00000, actual: $data00
+sql show tags from st_float_9_0
+if $data05 != 1500.00000 then
+ print expect 1500.00000, actual: $data05
return -1
endi
sql create table st_float_10_0 using mt_float tags ('-1.5e-3')
-sql select tagname from st_float_10_0
-if $data00 != -0.00150 then
- print expect -0.00150, actual: $data00
+sql show tags from st_float_10_0
+if $data05 != -0.00150 then
+ print expect -0.00150, actual: $data05
return -1
endi
sql create table st_float_11_0 using mt_float tags ('+1.5e+3')
-sql select tagname from st_float_11_0
-if $data00 != 1500.00000 then
- print expect 1500.00000, actual: $data00
+sql show tags from st_float_11_0
+if $data05 != 1500.00000 then
+ print expect 1500.00000, actual: $data05
return -1
endi
sql create table st_float_12_0 using mt_float tags ('-1.5e+3')
-sql select tagname from st_float_12_0
-if $data00 != -1500.00000 then
- print expect -1500.00000, actual: $data00
+sql show tags from st_float_12_0
+if $data05 != -1500.00000 then
+ print expect -1500.00000, actual: $data05
return -1
endi
sql create table st_float_13_0 using mt_float tags ('1.5e-3')
-sql select tagname from st_float_13_0
-if $data00 != 0.00150 then
- print expect 0.00150, actual: $data00
+sql show tags from st_float_13_0
+if $data05 != 0.00150 then
+ print expect 0.00150, actual: $data05
return -1
endi
sql create table st_float_14_0 using mt_float tags ('1.5E-3')
-sql select tagname from st_float_14_0
-if $data00 != 0.00150 then
- print expect 0.00150, actual: $data00
+sql show tags from st_float_14_0
+if $data05 != 0.00150 then
+ print expect 0.00150, actual: $data05
return -1
endi
#sql create table st_float_15_0 using mt_float tags (3.40282347e+38)
-#sql select tagname from st_float_15_0
-#if $data00 != 0.001500 then
+#sql show tags from st_float_15_0
+#if $data05 != 0.001500 then
# return -1
#endi
#sql create table st_float_16_0 using mt_float tags (-3.40282347e+38)
-#sql select tagname from st_float_16_0
-#if $data00 != 0.001500 then
+#sql show tags from st_float_16_0
+#if $data05 != 0.001500 then
# return -1
#endi
@@ -292,8 +299,8 @@ endi
## case 02: dynamic create table for test tag values
sql insert into st_float_16 using mt_float tags (NULL) values (now, NULL)
-sql select tagname from st_float_16
-if $data00 != NULL then
+sql show tags from st_float_16
+if $data05 != NULL then
return -1
endi
sql select * from st_float_16
@@ -302,8 +309,8 @@ if $data01 != NULL then
endi
sql insert into st_float_17 using mt_float tags (NULL) values (now, NULL)
-sql select tagname from st_float_17
-if $data00 != NULL then
+sql show tags from st_float_17
+if $data05 != NULL then
return -1
endi
sql select * from st_float_17
@@ -311,8 +318,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_float_18 using mt_float tags ('NULL') values (now, 'NULL')
-sql select tagname from st_float_18
-if $data00 != NULL then
+sql show tags from st_float_18
+if $data05 != NULL then
return -1
endi
sql select * from st_float_18
@@ -320,8 +327,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_float_19 using mt_float tags ('NULL') values (now, 'NULL')
-sql select tagname from st_float_19
-if $data00 != NULL then
+sql show tags from st_float_19
+if $data05 != NULL then
return -1
endi
sql select * from st_float_19
@@ -329,8 +336,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_float_20 using mt_float tags ("NULL") values (now, "NULL")
-sql select tagname from st_float_20
-if $data00 != NULL then
+sql show tags from st_float_20
+if $data05 != NULL then
return -1
endi
sql select * from st_float_20
@@ -338,8 +345,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_float_21 using mt_float tags ("NULL") values (now, "NULL")
-sql select tagname from st_float_21
-if $data00 != NULL then
+sql show tags from st_float_21
+if $data05 != NULL then
return -1
endi
sql select * from st_float_21
@@ -350,9 +357,9 @@ endi
sql_error insert into st_float_22 using mt_float tags (127) values (now, 3.40282347e+38)
sql insert into st_float_22 using mt_float tags (127) values (now, 340282346638528859811704183484516925440.00000)
-sql select tagname from st_float_22
-if $data00 != 127.00000 then
- print expect 127.00000, actual: $data00
+sql show tags from st_float_22
+if $data05 != 127.00000 then
+ print expect 127.00000, actual: $data05
return -1
endi
@@ -362,14 +369,14 @@ if $data01 != 127.00000 then
endi
sql insert into st_float_23 using mt_float tags (-127) values (now, -340282346638528859811704183484516925440.00000)
-sql select tagname from st_float_23
-if $data00 != -127.00000 then
+sql show tags from st_float_23
+if $data05 != -127.00000 then
return -1
endi
sql insert into st_float_24 using mt_float tags (10) values (now, 10)
-sql select tagname from st_float_24
-if $data00 != 10.00000 then
+sql show tags from st_float_24
+if $data05 != 10.00000 then
return -1
endi
sql select * from st_float_24
@@ -378,9 +385,9 @@ if $data01 != 10.00000 then
endi
sql insert into st_float_25 using mt_float tags ("-0") values (now, "-0")
-sql select tagname from st_float_25
-if $data00 != -0.00000 then
- print expect -0.00000, actual: $data00
+sql show tags from st_float_25
+if $data05 != -0.00000 then
+ print expect -0.00000, actual: $data05
return -1
endi
sql select * from st_float_25
@@ -388,9 +395,9 @@ if $data01 != -0.00000 then
return -1
endi
sql insert into st_float_26 using mt_float tags ('123') values (now, '12.3')
-sql select tagname from st_float_26
-if $data00 != 123.00000 then
- print expect 123.00000, actual: $data00
+sql show tags from st_float_26
+if $data05 != 123.00000 then
+ print expect 123.00000, actual: $data05
return -1
endi
sql select * from st_float_26
@@ -398,9 +405,9 @@ if $data01 != 12.30000 then
return -1
endi
sql insert into st_float_27 using mt_float tags (+056) values (now, +0005.6)
-sql select tagname from st_float_27
-if $data00 != 56.00000 then
- print expect 56.00000, actual:$data00
+sql show tags from st_float_27
+if $data05 != 56.00000 then
+ print expect 56.00000, actual:$data05
return -1
endi
sql select * from st_float_27
@@ -408,8 +415,8 @@ if $data01 != 5.60000 then
return -1
endi
sql insert into st_float_28 using mt_float tags (-056) values (now, -005.6)
-sql select tagname from st_float_28
-if $data00 != -56.00000 then
+sql show tags from st_float_28
+if $data05 != -56.00000 then
return -1
endi
sql select * from st_float_28
@@ -419,44 +426,44 @@ endi
### case 03: alter tag values
sql alter table st_float_0 set tag tagname=340282346638528859811704183484516925440.00000
-sql select tagname from st_float_0
-if $data00 != 340282346638528859811704183484516925440.00000 then
+sql show tags from st_float_0
+if $data05 != 340282346638528859811704183484516925440.00000 then
return -1
endi
sql alter table st_float_0 set tag tagname=-340282346638528859811704183484516925440.00000
-sql select tagname from st_float_0
-if $data00 != -340282346638528859811704183484516925440.00000 then
+sql show tags from st_float_0
+if $data05 != -340282346638528859811704183484516925440.00000 then
return -1
endi
sql alter table st_float_0 set tag tagname=+10.340
-sql select tagname from st_float_0
-if $data00 != 10.34000 then
+sql show tags from st_float_0
+if $data05 != 10.34000 then
return -1
endi
sql alter table st_float_0 set tag tagname=-33.87
-sql select tagname from st_float_0
-if $data00 != -33.87000 then
+sql show tags from st_float_0
+if $data05 != -33.87000 then
return -1
endi
sql alter table st_float_0 set tag tagname='+9.8'
-sql select tagname from st_float_0
-if $data00 != 9.80000 then
+sql show tags from st_float_0
+if $data05 != 9.80000 then
return -1
endi
sql alter table st_float_0 set tag tagname='-07.6'
-sql select tagname from st_float_0
-if $data00 != -7.60000 then
+sql show tags from st_float_0
+if $data05 != -7.60000 then
return -1
endi
sql alter table st_float_0 set tag tagname=+0012.871
-sql select tagname from st_float_0
-if $data00 != 12.87100 then
+sql show tags from st_float_0
+if $data05 != 12.87100 then
return -1
endi
sql alter table st_float_0 set tag tagname=-00063.582
-sql select tagname from st_float_0
-if $data00 != -63.58200 then
+sql show tags from st_float_0
+if $data05 != -63.58200 then
return -1
endi
@@ -468,11 +475,11 @@ sql_error create table st_float_e0 using mt_float tags (-333.40282347e+38)
#sql_error create table st_float_e0 using mt_float tags (12.80) truncate integer part
#sql_error create table st_float_e0 using mt_float tags (-11.80)
sql_error create table st_float_e0 using mt_float tags (123abc)
-sql_error create table st_float_e0 using mt_float tags ("123abc")
+sql create table st_float_e0_1 using mt_float tags ("123abc")
sql_error create table st_float_e0 using mt_float tags (abc)
-sql_error create table st_float_e0 using mt_float tags ("abc")
-sql_error create table st_float_e0 using mt_float tags (" ")
-sql_error create table st_float_e0 using mt_float tags ('')
+sql create table st_float_e0_2 using mt_float tags ("abc")
+sql create table st_float_e0_3 using mt_float tags (" ")
+sql create table st_float_e0_4 using mt_float tags ('')
sql create table st_float_e0 using mt_float tags (123)
sql create table st_float_e1 using mt_float tags (123)
@@ -499,7 +506,7 @@ sql_error insert into st_float_e7 values (now, "123abc")
sql_error insert into st_float_e9 values (now, abc)
sql_error insert into st_float_e10 values (now, "abc")
sql_error insert into st_float_e11 values (now, " ")
-sql_error insert into st_float_e12 values (now, '')
+sql insert into st_float_e12 values (now, '')
sql_error insert into st_float_e13 using mt_float tags (033) values (now, 3.50282347e+38)
sql_error insert into st_float_e14 using mt_float tags (033) values (now, -3.50282347e+38)
@@ -512,7 +519,7 @@ sql_error insert into st_float_e20 using mt_float tags (033) values (now, "123ab
sql_error insert into st_float_e22 using mt_float tags (033) values (now, abc)
sql_error insert into st_float_e23 using mt_float tags (033) values (now, "abc")
sql_error insert into st_float_e24 using mt_float tags (033) values (now, " ")
-sql_error insert into st_float_e25 using mt_float tags (033) values (now, '')
+sql insert into st_float_e25_1 using mt_float tags (033) values (now, '')
sql_error insert into st_float_e13 using mt_float tags (3.50282347e+38) values (now, -033)
sql_error insert into st_float_e14 using mt_float tags (-3.50282347e+38) values (now, -033)
@@ -525,7 +532,7 @@ sql_error insert into st_float_e20 using mt_float tags ("123abc") values (now, -
sql_error insert into st_float_e22 using mt_float tags (abc) values (now, -033)
sql_error insert into st_float_e23 using mt_float tags ("abc") values (now, -033)
sql_error insert into st_float_e24 using mt_float tags (" ") values (now, -033)
-sql_error insert into st_float_e25 using mt_float tags ('') values (now, -033)
+sql insert into st_float_e25_3 using mt_float tags ('') values (now, -033)
sql insert into st_float_e13 using mt_float tags (033) values (now, 00062)
sql insert into st_float_e14 using mt_float tags (033) values (now, 00062)
@@ -546,8 +553,8 @@ sql_error alter table st_float_e14 set tag tagname=-3.50282347e+38
sql_error alter table st_float_e15 set tag tagname=13.40282347e+38
sql_error alter table st_float_e16 set tag tagname=-13.40282347e+38
sql_error alter table st_float_e19 set tag tagname=123abc
-sql_error alter table st_float_e20 set tag tagname="123abc"
+sql alter table st_float_e20 set tag tagname="123abc"
sql_error alter table st_float_e22 set tag tagname=abc
-sql_error alter table st_float_e23 set tag tagname="abc"
-sql_error alter table st_float_e24 set tag tagname=" "
-sql_error alter table st_float_e25 set tag tagname=''
+sql alter table st_float_e23 set tag tagname="abc"
+sql alter table st_float_e24 set tag tagname=" "
+sql alter table st_float_e25 set tag tagname=''
diff --git a/tests/script/tsim/parser/columnValue_int.sim b/tests/script/tsim/parser/columnValue_int.sim
index 66be28ef89..48d95f5ecb 100644
--- a/tests/script/tsim/parser/columnValue_int.sim
+++ b/tests/script/tsim/parser/columnValue_int.sim
@@ -1,5 +1,12 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
sql connect
-sql create database if not exists db
+
+print ========== columnValues.sim
+
+sql drop database if exists db
+sql create database db
sql use db
#### test the value of all data types in four cases: static create table, insert column value, synamic create table, alter tag value
@@ -10,78 +17,64 @@ sql create table mt_int (ts timestamp, c int) tags (tagname int)
## case 00: static create table for test tag values
sql create table st_int_0 using mt_int tags (NULL)
-sql select tagname from st_int_0
-if $data00 != NULL then
+sql show tags from st_int_0
+if $data05 != NULL then
return -1
endi
sql create table st_int_1 using mt_int tags (NULL)
-sql select tagname from st_int_1
-if $data00 != NULL then
- return -1
-endi
-sql create table st_int_2 using mt_int tags ('NULL')
-sql select tagname from st_int_2
-if $data00 != NULL then
- return -1
-endi
-sql create table st_int_3 using mt_int tags ('NULL')
-sql select tagname from st_int_3
-if $data00 != NULL then
- return -1
-endi
-sql create table st_int_4 using mt_int tags ("NULL")
-sql select tagname from st_int_4
-if $data00 != NULL then
- return -1
-endi
-sql create table st_int_5 using mt_int tags ("NULL")
-sql select tagname from st_int_5
-if $data00 != NULL then
+sql show tags from st_int_1
+if $data05 != NULL then
return -1
endi
+
+sql_error create table st_int_2 using mt_int tags ('NULL')
+sql_error create table st_int_3 using mt_int tags ('NULL')
+sql_error create table st_int_4 using mt_int tags ("NULL")
+sql_error create table st_int_5 using mt_int tags ("NULL")
+
sql create table st_int_6 using mt_int tags (-2147483647)
-sql select tagname from st_int_6
-if $data00 != -2147483647 then
+sql show tags from st_int_6
+if $data05 != -2147483647 then
return -1
endi
sql create table st_int_7 using mt_int tags (2147483647)
-sql select tagname from st_int_7
-if $data00 != 2147483647 then
+sql show tags from st_int_7
+if $data05 != 2147483647 then
return -1
endi
sql create table st_int_8 using mt_int tags (37)
-sql select tagname from st_int_8
-if $data00 != 37 then
+sql show tags from st_int_8
+if $data05 != 37 then
return -1
endi
sql create table st_int_9 using mt_int tags (-100)
-sql select tagname from st_int_9
-if $data00 != -100 then
+sql show tags from st_int_9
+if $data05 != -100 then
return -1
endi
sql create table st_int_10 using mt_int tags (+113)
-sql select tagname from st_int_10
-if $data00 != 113 then
+sql show tags from st_int_10
+if $data05 != 113 then
return -1
endi
sql create table st_int_11 using mt_int tags ('-100')
-sql select tagname from st_int_11
-if $data00 != -100 then
+sql show tags from st_int_11
+if $data05 != -100 then
return -1
endi
sql create table st_int_12 using mt_int tags ("+78")
-sql select tagname from st_int_12
-if $data00 != 78 then
+sql show tags from st_int_12
+if $data05 != 78 then
return -1
endi
sql create table st_int_13 using mt_int tags (+0078)
-sql select tagname from st_int_13
-if $data00 != 78 then
+sql show tags from st_int_13
+if $data05 != 78 then
return -1
endi
sql create table st_int_14 using mt_int tags (-00078)
-sql select tagname from st_int_14
-if $data00 != -78 then
+sql show tags from st_int_14
+if $data05 != -78 then
return -1
endi
@@ -102,38 +95,6 @@ endi
if $data01 != NULL then
return -1
endi
-sql insert into st_int_2 values (now, 'NULL')
-sql select * from st_int_2
-if $rows != 1 then
- return -1
-endi
-if $data01 != NULL then
- return -1
-endi
-sql insert into st_int_3 values (now, 'NULL')
-sql select * from st_int_3
-if $rows != 1 then
- return -1
-endi
-if $data01 != NULL then
- return -1
-endi
-sql insert into st_int_4 values (now, "NULL")
-sql select * from st_int_4
-if $rows != 1 then
- return -1
-endi
-if $data01 != NULL then
- return -1
-endi
-sql insert into st_int_5 values (now, "NULL")
-sql select * from st_int_5
-if $rows != 1 then
- return -1
-endi
-if $data01 != NULL then
- return -1
-endi
sql insert into st_int_6 values (now, 2147483647)
sql select * from st_int_6
if $rows != 1 then
@@ -211,8 +172,8 @@ endi
## case 02: dynamic create table for test tag values
sql insert into st_int_16 using mt_int tags (NULL) values (now, NULL)
-sql select tagname from st_int_16
-if $data00 != NULL then
+sql show tags from st_int_16
+if $data05 != NULL then
return -1
endi
sql select * from st_int_16
@@ -221,8 +182,8 @@ if $data01 != NULL then
endi
sql insert into st_int_17 using mt_int tags (NULL) values (now, NULL)
-sql select tagname from st_int_17
-if $data00 != NULL then
+sql show tags from st_int_17
+if $data05 != NULL then
return -1
endi
sql select * from st_int_17
@@ -230,8 +191,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_int_18 using mt_int tags ('NULL') values (now, 'NULL')
-sql select tagname from st_int_18
-if $data00 != NULL then
+sql show tags from st_int_18
+if $data05 != NULL then
return -1
endi
sql select * from st_int_18
@@ -239,8 +200,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_int_19 using mt_int tags ('NULL') values (now, 'NULL')
-sql select tagname from st_int_19
-if $data00 != NULL then
+sql show tags from st_int_19
+if $data05 != NULL then
return -1
endi
sql select * from st_int_19
@@ -248,8 +209,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_int_20 using mt_int tags ("NULL") values (now, "NULL")
-sql select tagname from st_int_20
-if $data00 != NULL then
+sql show tags from st_int_20
+if $data05 != NULL then
return -1
endi
sql select * from st_int_20
@@ -257,8 +218,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_int_21 using mt_int tags ("NULL") values (now, "NULL")
-sql select tagname from st_int_21
-if $data00 != NULL then
+sql show tags from st_int_21
+if $data05 != NULL then
return -1
endi
sql select * from st_int_21
@@ -266,8 +227,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_int_22 using mt_int tags (2147483647) values (now, 2147483647)
-sql select tagname from st_int_22
-if $data00 != 2147483647 then
+sql show tags from st_int_22
+if $data05 != 2147483647 then
return -1
endi
sql select * from st_int_22
@@ -275,8 +236,8 @@ if $data01 != 2147483647 then
return -1
endi
sql insert into st_int_23 using mt_int tags (-2147483647) values (now, -2147483647)
-sql select tagname from st_int_23
-if $data00 != -2147483647 then
+sql show tags from st_int_23
+if $data05 != -2147483647 then
return -1
endi
sql select * from st_int_23
@@ -284,8 +245,8 @@ if $data01 != -2147483647 then
return -1
endi
sql insert into st_int_24 using mt_int tags (10) values (now, 10)
-sql select tagname from st_int_24
-if $data00 != 10 then
+sql show tags from st_int_24
+if $data05 != 10 then
return -1
endi
sql select * from st_int_24
@@ -293,8 +254,8 @@ if $data01 != 10 then
return -1
endi
sql insert into st_int_25 using mt_int tags ("-0") values (now, "-0")
-sql select tagname from st_int_25
-if $data00 != 0 then
+sql show tags from st_int_25
+if $data05 != 0 then
return -1
endi
sql select * from st_int_25
@@ -302,8 +263,8 @@ if $data01 != 0 then
return -1
endi
sql insert into st_int_26 using mt_int tags ('123') values (now, '123')
-sql select tagname from st_int_26
-if $data00 != 123 then
+sql show tags from st_int_26
+if $data05 != 123 then
return -1
endi
sql select * from st_int_26
@@ -311,8 +272,8 @@ if $data01 != 123 then
return -1
endi
sql insert into st_int_27 using mt_int tags (+056) values (now, +00056)
-sql select tagname from st_int_27
-if $data00 != 56 then
+sql show tags from st_int_27
+if $data05 != 56 then
return -1
endi
sql select * from st_int_27
@@ -320,8 +281,8 @@ if $data01 != 56 then
return -1
endi
sql insert into st_int_28 using mt_int tags (-056) values (now, -0056)
-sql select tagname from st_int_28
-if $data00 != -56 then
+sql show tags from st_int_28
+if $data05 != -56 then
return -1
endi
sql select * from st_int_28
@@ -331,49 +292,49 @@ endi
### case 03: alter tag values
#sql alter table st_int_0 set tag tagname=2147483647
-#sql select tagname from st_int_0
-#if $data00 != 2147483647 then
+#sql show tags from st_int_0
+#if $data05 != 2147483647 then
# return -1
#endi
#sql alter table st_int_0 set tag tagname=-2147483647
-#sql select tagname from st_int_0
-#if $data00 != -2147483647 then
+#sql show tags from st_int_0
+#if $data05 != -2147483647 then
# return -1
#endi
#sql alter table st_int_0 set tag tagname=+100
-#sql select tagname from st_int_0
-#if $data00 != 100 then
+#sql show tags from st_int_0
+#if $data05 != 100 then
# return -1
#endi
#sql alter table st_int_0 set tag tagname=-33
-#sql select tagname from st_int_0
-#if $data00 != -33 then
+#sql show tags from st_int_0
+#if $data05 != -33 then
# return -1
#endi
#sql alter table st_int_0 set tag tagname='+98'
-#sql select tagname from st_int_0
-#if $data00 != 98 then
+#sql show tags from st_int_0
+#if $data05 != 98 then
# return -1
#endi
#sql alter table st_int_0 set tag tagname='-076'
-#sql select tagname from st_int_0
-#if $data00 != -76 then
+#sql show tags from st_int_0
+#if $data05 != -76 then
# return -1
#endi
#sql alter table st_int_0 set tag tagname=+0012
-#sql select tagname from st_int_0
-#if $data00 != 12 then
+#sql show tags from st_int_0
+#if $data05 != 12 then
# return -1
#endi
#sql alter table st_int_0 set tag tagname=-00063
-#sql select tagname from st_int_0
-#if $data00 != -63 then
+#sql show tags from st_int_0
+#if $data05 != -63 then
# return -1
#endi
## case 04: illegal input
sql_error create table st_int_e0 using mt_int tags (2147483648)
-sql_error create table st_int_e0 using mt_int tags (-2147483648)
+sql create table st_int_e0_err1 using mt_int tags (-2147483648)
sql_error create table st_int_e0 using mt_int tags (214748364800)
sql_error create table st_int_e0 using mt_int tags (-214748364800)
#sql_error create table st_int_e0 using mt_int tags (12.80) truncate integer part
@@ -383,7 +344,7 @@ sql_error create table st_int_e0 using mt_int tags ("123abc")
sql_error create table st_int_e0 using mt_int tags (abc)
sql_error create table st_int_e0 using mt_int tags ("abc")
sql_error create table st_int_e0 using mt_int tags (" ")
-sql_error create table st_int_e0 using mt_int tags ('')
+sql create table st_int_e0_err2 using mt_int tags ('')
sql create table st_int_e0 using mt_int tags (123)
sql create table st_int_e1 using mt_int tags (123)
@@ -400,7 +361,7 @@ sql create table st_int_e11 using mt_int tags (123)
sql create table st_int_e12 using mt_int tags (123)
sql_error insert into st_int_e0 values (now, 2147483648)
-sql_error insert into st_int_e1 values (now, -2147483648)
+sql insert into st_int_e1 values (now, -2147483648)
sql_error insert into st_int_e2 values (now, 3147483648)
sql_error insert into st_int_e3 values (now, -21474836481)
#sql_error insert into st_int_e4 values (now, 12.80)
@@ -410,10 +371,10 @@ sql_error insert into st_int_e7 values (now, "123abc")
sql_error insert into st_int_e9 values (now, abc)
sql_error insert into st_int_e10 values (now, "abc")
sql_error insert into st_int_e11 values (now, " ")
-sql_error insert into st_int_e12 values (now, '')
+sql insert into st_int_e12 values (now, '')
sql_error insert into st_int_e13 using mt_int tags (033) values (now, 2147483648)
-sql_error insert into st_int_e14 using mt_int tags (033) values (now, -2147483648)
+sql insert into st_int_e14 using mt_int tags (033) values (now, -2147483648)
sql_error insert into st_int_e15 using mt_int tags (033) values (now, 5147483648)
sql_error insert into st_int_e16 using mt_int tags (033) values (now, -21474836481)
#sql_error insert into st_int_e17 using mt_int tags (033) values (now, 12.80)
@@ -423,10 +384,10 @@ sql_error insert into st_int_e20 using mt_int tags (033) values (now, "123abc")
sql_error insert into st_int_e22 using mt_int tags (033) values (now, abc)
sql_error insert into st_int_e23 using mt_int tags (033) values (now, "abc")
sql_error insert into st_int_e24 using mt_int tags (033) values (now, " ")
-sql_error insert into st_int_e25 using mt_int tags (033) values (now, '')
+sql insert into st_int_e25 using mt_int tags (033) values (now, '')
sql_error insert into st_int_e13 using mt_int tags (2147483648) values (now, -033)
-sql_error insert into st_int_e14 using mt_int tags (-2147483648) values (now, -033)
+sql insert into st_int_e14_1 using mt_int tags (-2147483648) values (now, -033)
sql_error insert into st_int_e15 using mt_int tags (21474836480) values (now, -033)
sql_error insert into st_int_e16 using mt_int tags (-2147483649) values (now, -033)
#sql_error insert into st_int_e17 using mt_int tags (12.80) values (now, -033)
@@ -436,7 +397,7 @@ sql_error insert into st_int_e20 using mt_int tags ("123abc") values (now, -033)
sql_error insert into st_int_e22 using mt_int tags (abc) values (now, -033)
sql_error insert into st_int_e23 using mt_int tags ("abc") values (now, -033)
sql_error insert into st_int_e24 using mt_int tags (" ") values (now, -033)
-sql_error insert into st_int_e25 using mt_int tags ('') values (now, -033)
+sql insert into st_int_e25_1 using mt_int tags ('') values (now, -033)
sql insert into st_int_e13 using mt_int tags (033) values (now, 00062)
sql insert into st_int_e14 using mt_int tags (033) values (now, 00062)
@@ -453,7 +414,7 @@ sql insert into st_int_e24 using mt_int tags (033) values (now, 00062)
sql insert into st_int_e25 using mt_int tags (033) values (now, 00062)
sql_error alter table st_int_e13 set tag tagname=2147483648
-sql_error alter table st_int_e14 set tag tagname=-2147483648
+sql alter table st_int_e14 set tag tagname=-2147483648
sql_error alter table st_int_e15 set tag tagname=12147483648
sql_error alter table st_int_e16 set tag tagname=-3147483648
sql_error alter table st_int_e19 set tag tagname=123abc
@@ -461,4 +422,4 @@ sql_error alter table st_int_e20 set tag tagname="123abc"
sql_error alter table st_int_e22 set tag tagname=abc
sql_error alter table st_int_e23 set tag tagname="abc"
sql_error alter table st_int_e24 set tag tagname=" "
-sql_error alter table st_int_e25 set tag tagname=''
+sql alter table st_int_e25 set tag tagname=''
diff --git a/tests/script/tsim/parser/columnValue_smallint.sim b/tests/script/tsim/parser/columnValue_smallint.sim
index 6608b6cea4..ced486ba0b 100644
--- a/tests/script/tsim/parser/columnValue_smallint.sim
+++ b/tests/script/tsim/parser/columnValue_smallint.sim
@@ -1,6 +1,17 @@
-sql create database if not exists db
-sql use db
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+print ========== columnValues.sim
+
+sql drop database if exists db
+sql create database db
+sql use db
+print ========== columnValues.sim
+
+sql drop database if exists db
+sql create database db
#### test the value of all data types in four cases: static create table, insert column value, synamic create table, alter tag value
######## case 0: smallint
@@ -9,78 +20,64 @@ sql create table mt_smallint (ts timestamp, c smallint) tags (tagname smallint)
## case 00: static create table for test tag values
sql create table st_smallint_0 using mt_smallint tags (NULL)
-sql select tagname from st_smallint_0
-if $data00 != NULL then
+sql show tags from st_smallint_0
+if $data05 != NULL then
return -1
endi
sql create table st_smallint_1 using mt_smallint tags (NULL)
-sql select tagname from st_smallint_1
-if $data00 != NULL then
- return -1
-endi
-sql create table st_smallint_2 using mt_smallint tags ('NULL')
-sql select tagname from st_smallint_2
-if $data00 != NULL then
- return -1
-endi
-sql create table st_smallint_3 using mt_smallint tags ('NULL')
-sql select tagname from st_smallint_3
-if $data00 != NULL then
- return -1
-endi
-sql create table st_smallint_4 using mt_smallint tags ("NULL")
-sql select tagname from st_smallint_4
-if $data00 != NULL then
- return -1
-endi
-sql create table st_smallint_5 using mt_smallint tags ("NULL")
-sql select tagname from st_smallint_5
-if $data00 != NULL then
+sql show tags from st_smallint_1
+if $data05 != NULL then
return -1
endi
+
+sql_error create table st_smallint_2 using mt_smallint tags ('NULL')
+sql_error create table st_smallint_3 using mt_smallint tags ('NULL')
+sql_error create table st_smallint_4 using mt_smallint tags ("NULL")
+sql_error create table st_smallint_5 using mt_smallint tags ("NULL")
+
sql create table st_smallint_6 using mt_smallint tags (-32767)
-sql select tagname from st_smallint_6
-if $data00 != -32767 then
+sql show tags from st_smallint_6
+if $data05 != -32767 then
return -1
endi
sql create table st_smallint_7 using mt_smallint tags (32767)
-sql select tagname from st_smallint_7
-if $data00 != 32767 then
+sql show tags from st_smallint_7
+if $data05 != 32767 then
return -1
endi
sql create table st_smallint_8 using mt_smallint tags (37)
-sql select tagname from st_smallint_8
-if $data00 != 37 then
+sql show tags from st_smallint_8
+if $data05 != 37 then
return -1
endi
sql create table st_smallint_9 using mt_smallint tags (-100)
-sql select tagname from st_smallint_9
-if $data00 != -100 then
+sql show tags from st_smallint_9
+if $data05 != -100 then
return -1
endi
sql create table st_smallint_10 using mt_smallint tags (+113)
-sql select tagname from st_smallint_10
-if $data00 != 113 then
+sql show tags from st_smallint_10
+if $data05 != 113 then
return -1
endi
sql create table st_smallint_11 using mt_smallint tags ('-100')
-sql select tagname from st_smallint_11
-if $data00 != -100 then
+sql show tags from st_smallint_11
+if $data05 != -100 then
return -1
endi
sql create table st_smallint_12 using mt_smallint tags ("+78")
-sql select tagname from st_smallint_12
-if $data00 != 78 then
+sql show tags from st_smallint_12
+if $data05 != 78 then
return -1
endi
sql create table st_smallint_13 using mt_smallint tags (+0078)
-sql select tagname from st_smallint_13
-if $data00 != 78 then
+sql show tags from st_smallint_13
+if $data05 != 78 then
return -1
endi
sql create table st_smallint_14 using mt_smallint tags (-00078)
-sql select tagname from st_smallint_14
-if $data00 != -78 then
+sql show tags from st_smallint_14
+if $data05 != -78 then
return -1
endi
@@ -101,38 +98,6 @@ endi
if $data01 != NULL then
return -1
endi
-sql insert into st_smallint_2 values (now, 'NULL')
-sql select * from st_smallint_2
-if $rows != 1 then
- return -1
-endi
-if $data01 != NULL then
- return -1
-endi
-sql insert into st_smallint_3 values (now, 'NULL')
-sql select * from st_smallint_3
-if $rows != 1 then
- return -1
-endi
-if $data01 != NULL then
- return -1
-endi
-sql insert into st_smallint_4 values (now, "NULL")
-sql select * from st_smallint_4
-if $rows != 1 then
- return -1
-endi
-if $data01 != NULL then
- return -1
-endi
-sql insert into st_smallint_5 values (now, "NULL")
-sql select * from st_smallint_5
-if $rows != 1 then
- return -1
-endi
-if $data01 != NULL then
- return -1
-endi
sql insert into st_smallint_6 values (now, 32767)
sql select * from st_smallint_6
if $rows != 1 then
@@ -210,8 +175,8 @@ endi
## case 02: dynamic create table for test tag values
sql insert into st_smallint_16 using mt_smallint tags (NULL) values (now, NULL)
-sql select tagname from st_smallint_16
-if $data00 != NULL then
+sql show tags from st_smallint_16
+if $data05 != NULL then
return -1
endi
sql select * from st_smallint_16
@@ -220,8 +185,8 @@ if $data01 != NULL then
endi
sql insert into st_smallint_17 using mt_smallint tags (NULL) values (now, NULL)
-sql select tagname from st_smallint_17
-if $data00 != NULL then
+sql show tags from st_smallint_17
+if $data05 != NULL then
return -1
endi
sql select * from st_smallint_17
@@ -229,8 +194,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_smallint_18 using mt_smallint tags ('NULL') values (now, 'NULL')
-sql select tagname from st_smallint_18
-if $data00 != NULL then
+sql show tags from st_smallint_18
+if $data05 != NULL then
return -1
endi
sql select * from st_smallint_18
@@ -238,8 +203,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_smallint_19 using mt_smallint tags ('NULL') values (now, 'NULL')
-sql select tagname from st_smallint_19
-if $data00 != NULL then
+sql show tags from st_smallint_19
+if $data05 != NULL then
return -1
endi
sql select * from st_smallint_19
@@ -247,8 +212,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_smallint_20 using mt_smallint tags ("NULL") values (now, "NULL")
-sql select tagname from st_smallint_20
-if $data00 != NULL then
+sql show tags from st_smallint_20
+if $data05 != NULL then
return -1
endi
sql select * from st_smallint_20
@@ -256,8 +221,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_smallint_21 using mt_smallint tags ("NULL") values (now, "NULL")
-sql select tagname from st_smallint_21
-if $data00 != NULL then
+sql show tags from st_smallint_21
+if $data05 != NULL then
return -1
endi
sql select * from st_smallint_21
@@ -265,8 +230,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_smallint_22 using mt_smallint tags (32767) values (now, 32767)
-sql select tagname from st_smallint_22
-if $data00 != 32767 then
+sql show tags from st_smallint_22
+if $data05 != 32767 then
return -1
endi
sql select * from st_smallint_22
@@ -274,8 +239,8 @@ if $data01 != 32767 then
return -1
endi
sql insert into st_smallint_23 using mt_smallint tags (-32767) values (now, -32767)
-sql select tagname from st_smallint_23
-if $data00 != -32767 then
+sql show tags from st_smallint_23
+if $data05 != -32767 then
return -1
endi
sql select * from st_smallint_23
@@ -283,8 +248,8 @@ if $data01 != -32767 then
return -1
endi
sql insert into st_smallint_24 using mt_smallint tags (10) values (now, 10)
-sql select tagname from st_smallint_24
-if $data00 != 10 then
+sql show tags from st_smallint_24
+if $data05 != 10 then
return -1
endi
sql select * from st_smallint_24
@@ -292,8 +257,8 @@ if $data01 != 10 then
return -1
endi
sql insert into st_smallint_25 using mt_smallint tags ("-0") values (now, "-0")
-sql select tagname from st_smallint_25
-if $data00 != 0 then
+sql show tags from st_smallint_25
+if $data05 != 0 then
return -1
endi
sql select * from st_smallint_25
@@ -301,8 +266,8 @@ if $data01 != 0 then
return -1
endi
sql insert into st_smallint_26 using mt_smallint tags ('123') values (now, '123')
-sql select tagname from st_smallint_26
-if $data00 != 123 then
+sql show tags from st_smallint_26
+if $data05 != 123 then
return -1
endi
sql select * from st_smallint_26
@@ -310,8 +275,8 @@ if $data01 != 123 then
return -1
endi
sql insert into st_smallint_27 using mt_smallint tags (+056) values (now, +00056)
-sql select tagname from st_smallint_27
-if $data00 != 56 then
+sql show tags from st_smallint_27
+if $data05 != 56 then
return -1
endi
sql select * from st_smallint_27
@@ -319,8 +284,8 @@ if $data01 != 56 then
return -1
endi
sql insert into st_smallint_28 using mt_smallint tags (-056) values (now, -0056)
-sql select tagname from st_smallint_28
-if $data00 != -56 then
+sql show tags from st_smallint_28
+if $data05 != -56 then
return -1
endi
sql select * from st_smallint_28
@@ -330,49 +295,49 @@ endi
## case 03: alter tag values
#sql alter table st_smallint_0 set tag tagname=32767
-#sql select tagname from st_smallint_0
-#if $data00 != 32767 then
+#sql show tags from st_smallint_0
+#if $data05 != 32767 then
# return -1
#endi
#sql alter table st_smallint_0 set tag tagname=-32767
-#sql select tagname from st_smallint_0
-#if $data00 != -32767 then
+#sql show tags from st_smallint_0
+#if $data05 != -32767 then
# return -1
#endi
#sql alter table st_smallint_0 set tag tagname=+100
-#sql select tagname from st_smallint_0
-#if $data00 != 100 then
+#sql show tags from st_smallint_0
+#if $data05 != 100 then
# return -1
#endi
#sql alter table st_smallint_0 set tag tagname=-33
-#sql select tagname from st_smallint_0
-#if $data00 != -33 then
+#sql show tags from st_smallint_0
+#if $data05 != -33 then
# return -1
#endi
#sql alter table st_smallint_0 set tag tagname='+98'
-#sql select tagname from st_smallint_0
-#if $data00 != 98 then
+#sql show tags from st_smallint_0
+#if $data05 != 98 then
# return -1
#endi
#sql alter table st_smallint_0 set tag tagname='-076'
-#sql select tagname from st_smallint_0
-#if $data00 != -76 then
+#sql show tags from st_smallint_0
+#if $data05 != -76 then
# return -1
#endi
#sql alter table st_smallint_0 set tag tagname=+0012
-#sql select tagname from st_smallint_0
-#if $data00 != 12 then
+#sql show tags from st_smallint_0
+#if $data05 != 12 then
# return -1
#endi
#sql alter table st_smallint_0 set tag tagname=-00063
-#sql select tagname from st_smallint_0
-#if $data00 != -63 then
+#sql show tags from st_smallint_0
+#if $data05 != -63 then
# return -1
#endi
## case 04: illegal input
sql_error create table st_smallint_e0 using mt_smallint tags (32768)
-sql_error create table st_smallint_e0 using mt_smallint tags (-32768)
+sql create table st_smallint_e0_0 using mt_smallint tags (-32768)
sql_error create table st_smallint_e0 using mt_smallint tags (3276899)
sql_error create table st_smallint_e0 using mt_smallint tags (-3276833)
#sql_error create table st_smallint_e0 using mt_smallint tags (12.80) truncate integer part
@@ -382,7 +347,7 @@ sql_error create table st_smallint_e0 using mt_smallint tags ("123abc")
sql_error create table st_smallint_e0 using mt_smallint tags (abc)
sql_error create table st_smallint_e0 using mt_smallint tags ("abc")
sql_error create table st_smallint_e0 using mt_smallint tags (" ")
-sql_error create table st_smallint_e0 using mt_smallint tags ('')
+sql create table st_smallint_e0_1 using mt_smallint tags ('')
sql create table st_smallint_e0 using mt_smallint tags (123)
sql create table st_smallint_e1 using mt_smallint tags (123)
@@ -399,7 +364,7 @@ sql create table st_smallint_e11 using mt_smallint tags (123)
sql create table st_smallint_e12 using mt_smallint tags (123)
sql_error insert into st_smallint_e0 values (now, 32768)
-sql_error insert into st_smallint_e1 values (now, -32768)
+sql insert into st_smallint_e1 values (now, -32768)
sql_error insert into st_smallint_e2 values (now, 42768)
sql_error insert into st_smallint_e3 values (now, -32769)
#sql_error insert into st_smallint_e4 values (now, 12.80)
@@ -409,10 +374,10 @@ sql_error insert into st_smallint_e7 values (now, "123abc")
sql_error insert into st_smallint_e9 values (now, abc)
sql_error insert into st_smallint_e10 values (now, "abc")
sql_error insert into st_smallint_e11 values (now, " ")
-sql_error insert into st_smallint_e12 values (now, '')
+sql insert into st_smallint_e12 values (now, '')
sql_error insert into st_smallint_e13 using mt_smallint tags (033) values (now, 32768)
-sql_error insert into st_smallint_e14 using mt_smallint tags (033) values (now, -32768)
+sql insert into st_smallint_e14_1 using mt_smallint tags (033) values (now, -32768)
sql_error insert into st_smallint_e15 using mt_smallint tags (033) values (now, 32968)
sql_error insert into st_smallint_e16 using mt_smallint tags (033) values (now, -33768)
#sql_error insert into st_smallint_e17 using mt_smallint tags (033) values (now, 12.80)
@@ -422,10 +387,10 @@ sql_error insert into st_smallint_e20 using mt_smallint tags (033) values (now,
sql_error insert into st_smallint_e22 using mt_smallint tags (033) values (now, abc)
sql_error insert into st_smallint_e23 using mt_smallint tags (033) values (now, "abc")
sql_error insert into st_smallint_e24 using mt_smallint tags (033) values (now, " ")
-sql_error insert into st_smallint_e25 using mt_smallint tags (033) values (now, '')
+sql insert into st_smallint_e25_1 using mt_smallint tags (033) values (now, '')
sql_error insert into st_smallint_e13 using mt_smallint tags (32768) values (now, -033)
-sql_error insert into st_smallint_e14 using mt_smallint tags (-32768) values (now, -033)
+sql insert into st_smallint_e14 using mt_smallint tags (-32768) values (now, -033)
sql_error insert into st_smallint_e15 using mt_smallint tags (72768) values (now, -033)
sql_error insert into st_smallint_e16 using mt_smallint tags (-92768) values (now, -033)
#sql_error insert into st_smallint_e17 using mt_smallint tags (12.80) values (now, -033)
@@ -435,7 +400,7 @@ sql_error insert into st_smallint_e20 using mt_smallint tags ("123abc") values (
sql_error insert into st_smallint_e22 using mt_smallint tags (abc) values (now, -033)
sql_error insert into st_smallint_e23 using mt_smallint tags ("abc") values (now, -033)
sql_error insert into st_smallint_e24 using mt_smallint tags (" ") values (now, -033)
-sql_error insert into st_smallint_e25 using mt_smallint tags ('') values (now, -033)
+sql insert into st_smallint_e25 using mt_smallint tags ('') values (now, -033)
sql insert into st_smallint_e13 using mt_smallint tags (033) values (now, 00062)
sql insert into st_smallint_e14 using mt_smallint tags (033) values (now, 00062)
@@ -452,7 +417,7 @@ sql insert into st_smallint_e24 using mt_smallint tags (033) values (now, 00062)
sql insert into st_smallint_e25 using mt_smallint tags (033) values (now, 00062)
sql_error alter table st_smallint_e13 set tag tagname=32768
-sql_error alter table st_smallint_e14 set tag tagname=-32768
+sql alter table st_smallint_e14 set tag tagname=-32768
sql_error alter table st_smallint_e15 set tag tagname=52768
sql_error alter table st_smallint_e16 set tag tagname=-32778
sql_error alter table st_smallint_e19 set tag tagname=123abc
@@ -460,4 +425,4 @@ sql_error alter table st_smallint_e20 set tag tagname="123abc"
sql_error alter table st_smallint_e22 set tag tagname=abc
sql_error alter table st_smallint_e23 set tag tagname="abc"
sql_error alter table st_smallint_e24 set tag tagname=" "
-sql_error alter table st_smallint_e25 set tag tagname=''
+sql alter table st_smallint_e25 set tag tagname=''
diff --git a/tests/script/tsim/parser/columnValue_tinyint.sim b/tests/script/tsim/parser/columnValue_tinyint.sim
index 67c0f998ca..bc1fcd3445 100644
--- a/tests/script/tsim/parser/columnValue_tinyint.sim
+++ b/tests/script/tsim/parser/columnValue_tinyint.sim
@@ -1,4 +1,12 @@
-sql create database if not exists db
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print ========== columnValues.sim
+
+sql drop database if exists db
+sql create database db
sql use db
#### test the value of all data types in four cases: static create table, insert column value, synamic create table, alter tag value
@@ -9,79 +17,65 @@ sql create table mt_tinyint (ts timestamp, c tinyint) tags (tagname tinyint)
## case 00: static create table for test tag values
sql create table st_tinyint_0 using mt_tinyint tags (NULL)
-sql select tagname from st_tinyint_0
-if $data00 != NULL then
- print expect NULL, actually: $data00
+sql show tags from st_tinyint_0
+if $data05 != NULL then
+ print expect NULL, actually: $data05
return -1
endi
sql create table st_tinyint_1 using mt_tinyint tags (NULL)
-sql select tagname from st_tinyint_1
-if $data00 != NULL then
- return -1
-endi
-sql create table st_tinyint_2 using mt_tinyint tags ('NULL')
-sql select tagname from st_tinyint_2
-if $data00 != NULL then
- return -1
-endi
-sql create table st_tinyint_3 using mt_tinyint tags ('NULL')
-sql select tagname from st_tinyint_3
-if $data00 != NULL then
- return -1
-endi
-sql create table st_tinyint_4 using mt_tinyint tags ("NULL")
-sql select tagname from st_tinyint_4
-if $data00 != NULL then
- return -1
-endi
-sql create table st_tinyint_5 using mt_tinyint tags ("NULL")
-sql select tagname from st_tinyint_5
-if $data00 != NULL then
+sql show tags from st_tinyint_1
+if $data05 != NULL then
return -1
endi
+
+sql_error create table st_tinyint_2 using mt_tinyint tags ('NULL')
+sql_error create table st_tinyint_3 using mt_tinyint tags ('NULL')
+sql_error create table st_tinyint_4 using mt_tinyint tags ("NULL")
+sql_error create table st_tinyint_5 using mt_tinyint tags ("NULL")
+
sql create table st_tinyint_6 using mt_tinyint tags (-127)
-sql select tagname from st_tinyint_6
-if $data00 != -127 then
+sql show tags from st_tinyint_6
+if $data05 != -127 then
return -1
endi
sql create table st_tinyint_7 using mt_tinyint tags (127)
-sql select tagname from st_tinyint_7
-if $data00 != 127 then
+sql show tags from st_tinyint_7
+if $data05 != 127 then
return -1
endi
sql create table st_tinyint_8 using mt_tinyint tags (37)
-sql select tagname from st_tinyint_8
-if $data00 != 37 then
+sql show tags from st_tinyint_8
+if $data05 != 37 then
return -1
endi
sql create table st_tinyint_9 using mt_tinyint tags (-100)
-sql select tagname from st_tinyint_9
-if $data00 != -100 then
+sql show tags from st_tinyint_9
+if $data05 != -100 then
return -1
endi
sql create table st_tinyint_10 using mt_tinyint tags (+113)
-sql select tagname from st_tinyint_10
-if $data00 != 113 then
+sql show tags from st_tinyint_10
+if $data05 != 113 then
return -1
endi
sql create table st_tinyint_11 using mt_tinyint tags ('-100')
-sql select tagname from st_tinyint_11
-if $data00 != -100 then
+sql show tags from st_tinyint_11
+if $data05 != -100 then
return -1
endi
sql create table st_tinyint_12 using mt_tinyint tags ("+78")
-sql select tagname from st_tinyint_12
-if $data00 != 78 then
+sql show tags from st_tinyint_12
+if $data05 != 78 then
return -1
endi
sql create table st_tinyint_13 using mt_tinyint tags (+0078)
-sql select tagname from st_tinyint_13
-if $data00 != 78 then
+sql show tags from st_tinyint_13
+if $data05 != 78 then
return -1
endi
sql create table st_tinyint_14 using mt_tinyint tags (-00078)
-sql select tagname from st_tinyint_14
-if $data00 != -78 then
+sql show tags from st_tinyint_14
+if $data05 != -78 then
return -1
endi
@@ -102,38 +96,6 @@ endi
if $data01 != NULL then
return -1
endi
-sql insert into st_tinyint_2 values (now, 'NULL')
-sql select * from st_tinyint_2
-if $rows != 1 then
- return -1
-endi
-if $data01 != NULL then
- return -1
-endi
-sql insert into st_tinyint_3 values (now, 'NULL')
-sql select * from st_tinyint_3
-if $rows != 1 then
- return -1
-endi
-if $data01 != NULL then
- return -1
-endi
-sql insert into st_tinyint_4 values (now, "NULL")
-sql select * from st_tinyint_4
-if $rows != 1 then
- return -1
-endi
-if $data01 != NULL then
- return -1
-endi
-sql insert into st_tinyint_5 values (now, "NULL")
-sql select * from st_tinyint_5
-if $rows != 1 then
- return -1
-endi
-if $data01 != NULL then
- return -1
-endi
sql insert into st_tinyint_6 values (now, 127)
sql select * from st_tinyint_6
if $rows != 1 then
@@ -211,8 +173,8 @@ endi
## case 02: dynamic create table for test tag values
sql insert into st_tinyint_16 using mt_tinyint tags (NULL) values (now, NULL)
-sql select tagname from st_tinyint_16
-if $data00 != NULL then
+sql show tags from st_tinyint_16
+if $data05 != NULL then
return -1
endi
sql select * from st_tinyint_16
@@ -221,8 +183,8 @@ if $data01 != NULL then
endi
sql insert into st_tinyint_17 using mt_tinyint tags (NULL) values (now, NULL)
-sql select tagname from st_tinyint_17
-if $data00 != NULL then
+sql show tags from st_tinyint_17
+if $data05 != NULL then
return -1
endi
sql select * from st_tinyint_17
@@ -230,8 +192,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_tinyint_18 using mt_tinyint tags ('NULL') values (now, 'NULL')
-sql select tagname from st_tinyint_18
-if $data00 != NULL then
+sql show tags from st_tinyint_18
+if $data05 != NULL then
return -1
endi
sql select * from st_tinyint_18
@@ -239,8 +201,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_tinyint_19 using mt_tinyint tags ('NULL') values (now, 'NULL')
-sql select tagname from st_tinyint_19
-if $data00 != NULL then
+sql show tags from st_tinyint_19
+if $data05 != NULL then
return -1
endi
sql select * from st_tinyint_19
@@ -248,8 +210,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_tinyint_20 using mt_tinyint tags ("NULL") values (now, "NULL")
-sql select tagname from st_tinyint_20
-if $data00 != NULL then
+sql show tags from st_tinyint_20
+if $data05 != NULL then
return -1
endi
sql select * from st_tinyint_20
@@ -257,8 +219,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_tinyint_21 using mt_tinyint tags ("NULL") values (now, "NULL")
-sql select tagname from st_tinyint_21
-if $data00 != NULL then
+sql show tags from st_tinyint_21
+if $data05 != NULL then
return -1
endi
sql select * from st_tinyint_21
@@ -266,8 +228,8 @@ if $data01 != NULL then
return -1
endi
sql insert into st_tinyint_22 using mt_tinyint tags (127) values (now, 127)
-sql select tagname from st_tinyint_22
-if $data00 != 127 then
+sql show tags from st_tinyint_22
+if $data05 != 127 then
return -1
endi
sql select * from st_tinyint_22
@@ -275,8 +237,8 @@ if $data01 != 127 then
return -1
endi
sql insert into st_tinyint_23 using mt_tinyint tags (-127) values (now, -127)
-sql select tagname from st_tinyint_23
-if $data00 != -127 then
+sql show tags from st_tinyint_23
+if $data05 != -127 then
return -1
endi
sql select * from st_tinyint_23
@@ -284,8 +246,8 @@ if $data01 != -127 then
return -1
endi
sql insert into st_tinyint_24 using mt_tinyint tags (10) values (now, 10)
-sql select tagname from st_tinyint_24
-if $data00 != 10 then
+sql show tags from st_tinyint_24
+if $data05 != 10 then
return -1
endi
sql select * from st_tinyint_24
@@ -293,8 +255,8 @@ if $data01 != 10 then
return -1
endi
sql insert into st_tinyint_25 using mt_tinyint tags ("-0") values (now, "-0")
-sql select tagname from st_tinyint_25
-if $data00 != 0 then
+sql show tags from st_tinyint_25
+if $data05 != 0 then
return -1
endi
sql select * from st_tinyint_25
@@ -302,8 +264,8 @@ if $data01 != 0 then
return -1
endi
sql insert into st_tinyint_26 using mt_tinyint tags ('123') values (now, '123')
-sql select tagname from st_tinyint_26
-if $data00 != 123 then
+sql show tags from st_tinyint_26
+if $data05 != 123 then
return -1
endi
sql select * from st_tinyint_26
@@ -311,8 +273,8 @@ if $data01 != 123 then
return -1
endi
sql insert into st_tinyint_27 using mt_tinyint tags (+056) values (now, +00056)
-sql select tagname from st_tinyint_27
-if $data00 != 56 then
+sql show tags from st_tinyint_27
+if $data05 != 56 then
return -1
endi
sql select * from st_tinyint_27
@@ -320,8 +282,8 @@ if $data01 != 56 then
return -1
endi
sql insert into st_tinyint_28 using mt_tinyint tags (-056) values (now, -0056)
-sql select tagname from st_tinyint_28
-if $data00 != -56 then
+sql show tags from st_tinyint_28
+if $data05 != -56 then
return -1
endi
sql select * from st_tinyint_28
@@ -331,49 +293,49 @@ endi
## case 03: alter tag values
#sql alter table st_tinyint_0 set tag tagname=127
-#sql select tagname from st_tinyint_0
-#if $data00 != 127 then
+#sql show tags from st_tinyint_0
+#if $data05 != 127 then
# return -1
#endi
#sql alter table st_tinyint_0 set tag tagname=-127
-#sql select tagname from st_tinyint_0
-#if $data00 != -127 then
+#sql show tags from st_tinyint_0
+#if $data05 != -127 then
# return -1
#endi
#sql alter table st_tinyint_0 set tag tagname=+100
-#sql select tagname from st_tinyint_0
-#if $data00 != 100 then
+#sql show tags from st_tinyint_0
+#if $data05 != 100 then
# return -1
#endi
#sql alter table st_tinyint_0 set tag tagname=-33
-#sql select tagname from st_tinyint_0
-#if $data00 != -33 then
+#sql show tags from st_tinyint_0
+#if $data05 != -33 then
# return -1
#endi
#sql alter table st_tinyint_0 set tag tagname='+98'
-#sql select tagname from st_tinyint_0
-#if $data00 != 98 then
+#sql show tags from st_tinyint_0
+#if $data05 != 98 then
# return -1
#endi
#sql alter table st_tinyint_0 set tag tagname='-076'
-#sql select tagname from st_tinyint_0
-#if $data00 != -76 then
+#sql show tags from st_tinyint_0
+#if $data05 != -76 then
# return -1
#endi
#sql alter table st_tinyint_0 set tag tagname=+0012
-#sql select tagname from st_tinyint_0
-#if $data00 != 12 then
+#sql show tags from st_tinyint_0
+#if $data05 != 12 then
# return -1
#endi
#sql alter table st_tinyint_0 set tag tagname=-00063
-#sql select tagname from st_tinyint_0
-#if $data00 != -63 then
+#sql show tags from st_tinyint_0
+#if $data05 != -63 then
# return -1
#endi
## case 04: illegal input
sql_error create table st_tinyint_e0 using mt_tinyint tags (128)
-sql_error create table st_tinyint_e0 using mt_tinyint tags (-128)
+sql create table st_tinyint_e0_1 using mt_tinyint tags (-128)
sql_error create table st_tinyint_e0 using mt_tinyint tags (1280)
sql_error create table st_tinyint_e0 using mt_tinyint tags (-1280)
#sql_error create table st_tinyint_e0 using mt_tinyint tags (12.80) truncate integer part
@@ -383,7 +345,7 @@ sql_error create table st_tinyint_e0 using mt_tinyint tags ("123abc")
sql_error create table st_tinyint_e0 using mt_tinyint tags (abc)
sql_error create table st_tinyint_e0 using mt_tinyint tags ("abc")
sql_error create table st_tinyint_e0 using mt_tinyint tags (" ")
-sql_error create table st_tinyint_e0 using mt_tinyint tags ('')
+sql create table st_tinyint_e0_2 using mt_tinyint tags ('')
sql create table st_tinyint_e0 using mt_tinyint tags (123)
sql create table st_tinyint_e1 using mt_tinyint tags (123)
@@ -400,7 +362,7 @@ sql create table st_tinyint_e11 using mt_tinyint tags (123)
sql create table st_tinyint_e12 using mt_tinyint tags (123)
sql_error insert into st_tinyint_e0 values (now, 128)
-sql_error insert into st_tinyint_e1 values (now, -128)
+sql insert into st_tinyint_e1 values (now, -128)
sql_error insert into st_tinyint_e2 values (now, 1280)
sql_error insert into st_tinyint_e3 values (now, -1280)
#sql_error insert into st_tinyint_e4 values (now, 12.80)
@@ -410,10 +372,10 @@ sql_error insert into st_tinyint_e7 values (now, "123abc")
sql_error insert into st_tinyint_e9 values (now, abc)
sql_error insert into st_tinyint_e10 values (now, "abc")
sql_error insert into st_tinyint_e11 values (now, " ")
-sql_error insert into st_tinyint_e12 values (now, '')
+sql insert into st_tinyint_e12 values (now, '')
sql_error insert into st_tinyint_e13 using mt_tinyint tags (033) values (now, 128)
-sql_error insert into st_tinyint_e14 using mt_tinyint tags (033) values (now, -128)
+sql insert into st_tinyint_e14_1 using mt_tinyint tags (033) values (now, -128)
sql_error insert into st_tinyint_e15 using mt_tinyint tags (033) values (now, 1280)
sql_error insert into st_tinyint_e16 using mt_tinyint tags (033) values (now, -1280)
#sql_error insert into st_tinyint_e17 using mt_tinyint tags (033) values (now, 12.80)
@@ -423,10 +385,10 @@ sql_error insert into st_tinyint_e20 using mt_tinyint tags (033) values (now, "1
sql_error insert into st_tinyint_e22 using mt_tinyint tags (033) values (now, abc)
sql_error insert into st_tinyint_e23 using mt_tinyint tags (033) values (now, "abc")
sql_error insert into st_tinyint_e24 using mt_tinyint tags (033) values (now, " ")
-sql_error insert into st_tinyint_e25 using mt_tinyint tags (033) values (now, '')
+sql insert into st_tinyint_e25_2 using mt_tinyint tags (033) values (now, '')
sql_error insert into st_tinyint_e13 using mt_tinyint tags (128) values (now, -033)
-sql_error insert into st_tinyint_e14 using mt_tinyint tags (-128) values (now, -033)
+sql insert into st_tinyint_e14 using mt_tinyint tags (-128) values (now, -033)
sql_error insert into st_tinyint_e15 using mt_tinyint tags (1280) values (now, -033)
sql_error insert into st_tinyint_e16 using mt_tinyint tags (-1280) values (now, -033)
#sql_error insert into st_tinyint_e17 using mt_tinyint tags (12.80) values (now, -033)
@@ -436,7 +398,7 @@ sql_error insert into st_tinyint_e20 using mt_tinyint tags ("123abc") values (no
sql_error insert into st_tinyint_e22 using mt_tinyint tags (abc) values (now, -033)
sql_error insert into st_tinyint_e23 using mt_tinyint tags ("abc") values (now, -033)
sql_error insert into st_tinyint_e24 using mt_tinyint tags (" ") values (now, -033)
-sql_error insert into st_tinyint_e25 using mt_tinyint tags ('') values (now, -033)
+sql insert into st_tinyint_e25 using mt_tinyint tags ('') values (now, -033)
sql insert into st_tinyint_e13 using mt_tinyint tags (033) values (now, 00062)
sql insert into st_tinyint_e14 using mt_tinyint tags (033) values (now, 00062)
@@ -453,7 +415,7 @@ sql insert into st_tinyint_e24 using mt_tinyint tags (033) values (now, 00062)
sql insert into st_tinyint_e25 using mt_tinyint tags (033) values (now, 00062)
sql_error alter table st_tinyint_e13 set tag tagname=128
-sql_error alter table st_tinyint_e14 set tag tagname=-128
+sql alter table st_tinyint_e14 set tag tagname=-128
sql_error alter table st_tinyint_e15 set tag tagname=1280
sql_error alter table st_tinyint_e16 set tag tagname=-1280
sql_error alter table st_tinyint_e19 set tag tagname=123abc
@@ -461,4 +423,4 @@ sql_error alter table st_tinyint_e20 set tag tagname="123abc"
sql_error alter table st_tinyint_e22 set tag tagname=abc
sql_error alter table st_tinyint_e23 set tag tagname="abc"
sql_error alter table st_tinyint_e24 set tag tagname=" "
-sql_error alter table st_tinyint_e25 set tag tagname=''
+sql alter table st_tinyint_e25 set tag tagname=''
diff --git a/tests/script/tsim/parser/columnValue_unsign.sim b/tests/script/tsim/parser/columnValue_unsign.sim
index 4b8baf10cd..a72b1082f6 100644
--- a/tests/script/tsim/parser/columnValue_unsign.sim
+++ b/tests/script/tsim/parser/columnValue_unsign.sim
@@ -1,4 +1,12 @@
-sql create database if not exists db
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print ========== columnValues.sim
+
+sql drop database if exists db
+sql create database db
sql use db
sql drop table if exists mt_unsigned;
@@ -10,28 +18,21 @@ sql alter table mt_unsigned_1 set tag t1=138;
sql alter table mt_unsigned_1 set tag t2=32769;
sql alter table mt_unsigned_1 set tag t3=294967295;
sql alter table mt_unsigned_1 set tag t4=446744073709551615;
+sql insert into mt_unsigned_1 values (now, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
sql select t1,t2,t3,t4 from mt_unsigned_1
if $rows != 1 then
return -1
endi
-
-print $data00, $data01, $data02, $data03
-
-if $data00 != 138 then
- print expect 138, actual: $data00
- return -1
-endi
-
+print $data01, $data02, $data03
if $data01 != 32769 then
-return -1
+ return -1
endi
-
if $data02 != 294967295 then
-return -1
+ return -1
endi
-
if $data03 != 446744073709551615 then
-return -1
+ return -1
endi
sql_error sql alter table mt_unsigned_1 set tag t1 = 999;
@@ -44,10 +45,10 @@ sql_error create table mt_unsigned_3 using mt_unsigned tags(0, -1, 0, 0, 0, 0, 0
sql_error create table mt_unsigned_4 using mt_unsigned tags(0, 0, -1, 0, 0, 0, 0, 0);
sql_error create table mt_unsigned_5 using mt_unsigned tags(0, 0, 0, -1, 0, 0, 0, 0);
-sql_error create table mt_unsigned_2 using mt_unsigned tags(255, 0, 0, 0, 0, 0, 0, 0);
-sql_error create table mt_unsigned_3 using mt_unsigned tags(0, 65535, 0, 0, 0, 0, 0, 0);
-sql_error create table mt_unsigned_4 using mt_unsigned tags(0, 0, 4294967295, 0, 0, 0, 0, 0);
-sql_error create table mt_unsigned_5 using mt_unsigned tags(0, 0, 0, 18446744073709551615, 0, 0, 0, 0);
+sql create table mt_unsigned_21 using mt_unsigned tags(255, 0, 0, 0, 0, 0, 0, 0);
+sql create table mt_unsigned_31 using mt_unsigned tags(0, 65535, 0, 0, 0, 0, 0, 0);
+sql create table mt_unsigned_41 using mt_unsigned tags(0, 0, 4294967295, 0, 0, 0, 0, 0);
+sql create table mt_unsigned_51 using mt_unsigned tags(0, 0, 0, 18446744073709551615, 0, 0, 0, 0);
sql_error create table mt_unsigned_2 using mt_unsigned tags(999, 0, 0, 0, 0, 0, 0, 0);
sql_error create table mt_unsigned_3 using mt_unsigned tags(0, 95535, 0, 0, 0, 0, 0, 0);
@@ -63,11 +64,6 @@ if $rows != 1 then
return -1;
endi
-if $data00 != NULL then
- print expect NULL, actual: $data00
- return -1
-endi
-
if $data01 != NULL then
return -1
endi
@@ -87,82 +83,44 @@ sql_error insert into mt_unsigned_1 values(now, -1, NULL, NULL, NULL, NULL, NULL
sql_error insert into mt_unsigned_1 values(now, NULL, -1, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
sql_error insert into mt_unsigned_1 values(now, NULL, NULL, -1, NULL, NULL, NULL, NULL, NULL, NULL);
sql_error insert into mt_unsigned_1 values(now, NULL, NULL, NULL, -1, NULL, NULL, NULL, NULL, NULL);
-sql_error insert into mt_unsigned_1 values(now, 255, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
-sql_error insert into mt_unsigned_1 values(now, NULL, 65535, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
-sql_error insert into mt_unsigned_1 values(now, NULL, NULL, 4294967295, NULL, NULL, NULL, NULL, NULL, NULL);
-sql_error insert into mt_unsigned_1 values(now, NULL, NULL, NULL, 18446744073709551615, NULL, NULL, NULL, NULL, NULL);
+sql insert into mt_unsigned_1 values(now, 255, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+sql insert into mt_unsigned_1 values(now, NULL, 65535, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+sql insert into mt_unsigned_1 values(now, NULL, NULL, 4294967295, NULL, NULL, NULL, NULL, NULL, NULL);
+sql insert into mt_unsigned_1 values(now, NULL, NULL, NULL, 18446744073709551615, NULL, NULL, NULL, NULL, NULL);
+
sql select count(a),count(b),count(c),count(d), count(e) from mt_unsigned_1
if $rows != 1 then
return -1
endi
-if $data00 != 1 then
- return -1
-endi
-
-if $data01 != 1 then
- return -1
-endi
-
-sql select a+b+c from mt_unsigned_1 where a is null;
-if $rows != 1 then
- return -1
-endi
-
-if $data00 != NULL then
- print expect NULL, actual:$data00
- return -1
-endi
-
-sql select count(*), a from mt_unsigned_1 group by a;
-if $rows != 1 then
- return -1
-endi
-
-if $data00 != 1 then
- return -1
-endi
-
-if $data01 != 1 then
- return -1
-endi
-
-sql select count(*), b from mt_unsigned_1 group by b;
-if $rows != 1 then
- return -1
-endi
-
-if $data00 != 1 then
- return -1
-endi
-
-if $data01 != 2 then
- return -1
-endi
-
-sql select count(*), c from mt_unsigned_1 group by c;
-if $rows != 1 then
- return -1
-endi
-
-if $data00 != 1 then
- return -1
-endi
-
if $data01 != 3 then
return -1
endi
+sql select a+b+c from mt_unsigned_1 where a is null;
+if $rows != 4 then
+ return -1
+endi
+
+sql select count(*), a from mt_unsigned_1 group by a;
+if $rows != 4 then
+ return -1
+endi
+
+sql select count(*), b from mt_unsigned_1 group by b;
+if $rows != 4 then
+ return -1
+endi
+
+
+sql select count(*), c from mt_unsigned_1 group by c;
+if $rows != 4 then
+ return -1
+endi
+
+
sql select count(*), d from mt_unsigned_1 group by d;
-if $rows != 1 then
- return -1
-endi
-
-if $data00 != 1 then
- return -1
-endi
-
-if $data01 != 4 then
+if $rows != 4 then
return -1
endi
diff --git a/tests/script/tsim/parser/first_last_query.sim b/tests/script/tsim/parser/first_last_query.sim
index adad554fb2..d7d6f48259 100644
--- a/tests/script/tsim/parser/first_last_query.sim
+++ b/tests/script/tsim/parser/first_last_query.sim
@@ -283,12 +283,14 @@ print ================== server restart completed
sql connect
sql use first_db0;
-sql select last(*), tbname from m1 group by tbname;
+sql select last(*), tbname from m1 group by tbname order by tbname;
+
if $rows != 2 then
return -1
endi
if $data00 != @20-03-01 01:01:01.000@ then
+ print data00 $data00 != 20-03-01 01:01:01.000@
return -1
endi
diff --git a/tests/script/tsim/parser/function.sim b/tests/script/tsim/parser/function.sim
index cbfb59bcab..110901a6e1 100644
--- a/tests/script/tsim/parser/function.sim
+++ b/tests/script/tsim/parser/function.sim
@@ -500,11 +500,12 @@ if $rows != 2 then
return -1
endi
-sql select stddev(k), stddev(b), stddev(c),tbname, a from m1 group by tbname,a
+sql select stddev(k), stddev(b), stddev(c),tbname, a from m1 group by tbname,a order by a asc
if $rows != 2 then
return -1
endi
if $data00 != 1.414213562 then
+ print expect 1.414213562, actual: $data00
return -1
endi
if $data01 != 14.142135624 then
@@ -732,6 +733,7 @@ if $rows != 1 then
return -1
endi
if $data00 != 0.005633334 then
+ print expect 0.005633334, actual: $data00
return -1
endi
diff --git a/tests/script/tsim/parser/groupby.sim b/tests/script/tsim/parser/groupby.sim
index bf2c7cc7bf..c4c19ca211 100644
--- a/tests/script/tsim/parser/groupby.sim
+++ b/tests/script/tsim/parser/groupby.sim
@@ -681,12 +681,13 @@ if $data14 != 1 then
return -1
endi
-sql select _wstart, irate(c), tbname, t1, t2 from st where t1=1 and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' partition by tbname, t1, t2 interval(1m) sliding(15s) order by tbname desc limit 1;
+sql select _wstart, irate(c), tbname, t1, t2 from st where t1=1 and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' partition by tbname, t1, t2 interval(1m) sliding(15s) order by tbname desc,_wstart asc limit 1;
if $rows != 1 then
return -1
endi
if $data01 != 1.000000000 then
+ print expect 1.000000000, actual: $data01
return -1
endi
if $data02 != t2 then
diff --git a/tests/script/tsim/parser/join.sim b/tests/script/tsim/parser/join.sim
index 269d4ca254..0f41ebd178 100644
--- a/tests/script/tsim/parser/join.sim
+++ b/tests/script/tsim/parser/join.sim
@@ -243,9 +243,6 @@ if $rows != $val then
return -1
endi
-#TODO
-return
-
#===========================aggregation===================================
#select + where condition
sql select count(join_tb1.*), count(join_tb0.*) from $tb1 , $tb2 where $ts1 = $ts2 and join_tb1.ts >= 100000 and join_tb0.c7 = false;
diff --git a/tests/script/tsim/parser/limit_stb.sim b/tests/script/tsim/parser/limit_stb.sim
index 0d0e4a8ea3..a0aff953cf 100644
--- a/tests/script/tsim/parser/limit_stb.sim
+++ b/tests/script/tsim/parser/limit_stb.sim
@@ -360,6 +360,7 @@ endi
#sql select max(c1), min(c2), avg(c3), sum(c5), spread(c6), first(c7), last(c8), first(c9) from lm_stb0 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 10:30:00.000' and c1 > 1 and c2 < 9 and c3 > 2 and c4 < 8 and c5 > 3 and c6 < 7 and c7 > 0 and c8 like '%5' and t1 > 3 and t1 < 6 limit 1 offset 0;
sql select max(c1), min(c2), avg(c3), sum(c5), spread(c6), first(c7), last(c8), first(c9) from lm_stb0 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 10:30:00.000' and c1 > 1 and c2 < 9 and c3 > 2 and c4 < 8 and c5 > 3 and c6 < 7 and c7 = true and c8 like '%5' and t1 > 3 and t1 < 6 limit 1 offset 0;
if $rows != 1 then
+ print expect 1, actual: $rows
return -1
endi
if $data00 != 5 then
diff --git a/tests/script/tsim/parser/tags_dynamically_specifiy.sim b/tests/script/tsim/parser/tags_dynamically_specifiy.sim
index d1f73c4f60..e6cdeea970 100644
--- a/tests/script/tsim/parser/tags_dynamically_specifiy.sim
+++ b/tests/script/tsim/parser/tags_dynamically_specifiy.sim
@@ -41,12 +41,10 @@ sql_error insert into tb17 (ts, c1, c3) using stb (t1, t3) tags ('tag5', 11.11,
sql_error insert into tb18 (ts, c1, c3) using stb tags ('tag5', 16) values ( now + 5s, 'binary6', 6.6)
sql_error insert into tb19 (ts, c1, c2, c3) using stb tags (19, 'tag5', 91.11) values ( now + 5s, 'binary7', 7, 7.7)
-
-
sql create table stbx (ts timestamp, c1 binary(10), c2 int, c3 float) tags (t1 binary(10), t2 int, t3 float)
sql insert into tb100 (ts, c1, c2, c3) using stbx (t1, t2, t3) tags ('tag100', 100, 100.123456) values ( now + 10s, 'binary100', 100, 100.9) tb101 (ts, c1, c2, c3) using stbx (t1, t2, t3) tags ('tag101', 101, 101.9) values ( now + 10s, 'binary101', 101, 101.9) tb102 (ts, c1, c2, c3) using stbx (t1, t2, t3) tags ('tag102', 102, 102.9) values ( now + 10s, 'binary102', 102, 102.9)
-sql select * from stbx
+sql select * from stbx order by t1
if $rows != 3 then
return -1
endi
diff --git a/tests/script/tsim/parser/tags_filter.sim b/tests/script/tsim/parser/tags_filter.sim
index bf33febdae..10fb135de3 100644
--- a/tests/script/tsim/parser/tags_filter.sim
+++ b/tests/script/tsim/parser/tags_filter.sim
@@ -100,7 +100,7 @@ endi
sql drop database $db
sql show databases
-if $rows != 0 then
+if $rows != 2 then
return -1
endi
diff --git a/tests/script/tsim/parser/union_sysinfo.sim b/tests/script/tsim/parser/union_sysinfo.sim
index ea45dc68e1..50d7c88e88 100644
--- a/tests/script/tsim/parser/union_sysinfo.sim
+++ b/tests/script/tsim/parser/union_sysinfo.sim
@@ -25,11 +25,5 @@ sql (select database()) union all (select database())
if $rows != 2 then
return -1
endi
-if $data00 != @union_db0@ then
- return -1
-endi
-if $data10 != @union_db0@ then
- return -1
-endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/where.sim b/tests/script/tsim/parser/where.sim
index 596bffa6f0..08e250c03e 100644
--- a/tests/script/tsim/parser/where.sim
+++ b/tests/script/tsim/parser/where.sim
@@ -77,12 +77,12 @@ if $rows != $val then
return -1
endi
-sql select tbname from $mt
+sql select distinct tbname from $mt
if $rows != $tbNum then
return -1
endi
-sql select tbname from $mt where t1 < 2
+sql select distinct tbname from $mt where t1 < 2
if $rows != 2 then
return -1
endi
@@ -249,14 +249,14 @@ sql_error insert into tb_where_NULL values(now, ?, '12')
sql insert into tb_where_NULL values ('2019-01-01 09:00:00.000', 1, 'val1')
sql insert into tb_where_NULL values ('2019-01-01 09:00:01.000', NULL, NULL)
sql insert into tb_where_NULL values ('2019-01-01 09:00:02.000', 2, 'val2')
-sql_error select * from tb_where_NULL where c1 = NULL
-sql_error select * from tb_where_NULL where c1 <> NULL
-sql_error select * from tb_where_NULL where c1 < NULL
-sql_error select * from tb_where_NULL where c1 = "NULL"
-sql_error select * from tb_where_NULL where c1 <> "NULL"
-sql_error select * from tb_where_NULL where c1 <> "nulL"
-sql_error select * from tb_where_NULL where c1 > "NULL"
-sql_error select * from tb_where_NULL where c1 >= "NULL"
+sql select * from tb_where_NULL where c1 = NULL
+sql select * from tb_where_NULL where c1 <> NULL
+sql select * from tb_where_NULL where c1 < NULL
+sql select * from tb_where_NULL where c1 = "NULL"
+sql select * from tb_where_NULL where c1 <> "NULL"
+sql select * from tb_where_NULL where c1 <> "nulL"
+sql select * from tb_where_NULL where c1 > "NULL"
+sql select * from tb_where_NULL where c1 >= "NULL"
sql select * from tb_where_NULL where c2 = "NULL"
if $rows != 0 then
return -1
@@ -300,15 +300,17 @@ endw
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s start
-sql_error select * from wh_mt0 where c3 = 'abc' and tbname in ('test_null_filter');
+sql select * from wh_mt0 where c3 = 'abc' and tbname in ('test_null_filter');
sql select * from wh_mt0 where c3 = '1' and tbname in ('test_null_filter');
if $row != 0 then
return -1
endi
-sql select * from wh_mt0 where c3 = '1';
-if $row == 0 then
+sql select * from wh_mt0 where c3 = 1;
+print $rows -> 1000
+print $data00 $data01 $data02
+if $row != 1000 then
return -1
endi
@@ -336,7 +338,7 @@ sql insert into where_ts values('2021-06-19 16:22:00', 1);
sql insert into where_ts values('2021-06-19 16:23:00', 2);
sql insert into where_ts values('2021-06-19 16:24:00', 3);
sql insert into where_ts values('2021-06-19 16:25:00', 1);
-sql select * from (select * from where_ts) where ts<'2021-06-19 16:25:00' and ts>'2021-06-19 16:22:00'
+sql select * from (select * from where_ts) where ts<'2021-06-19 16:25:00' and ts>'2021-06-19 16:22:00' order by ts;
if $rows != 2 then
return -1
endi
diff --git a/tests/script/tsim/stream/state0.sim b/tests/script/tsim/stream/state0.sim
index a0535cf93d..2573b44e19 100644
--- a/tests/script/tsim/stream/state0.sim
+++ b/tests/script/tsim/stream/state0.sim
@@ -498,4 +498,7 @@ if $data15 != 3 then
goto loop5
endi
+sql drop database test;
+sql drop database test1;
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/sync/vnodesnapshot-rsma-test.sim b/tests/script/tsim/sync/vnodesnapshot-rsma-test.sim
index ec03aaf9db..241781eed1 100644
--- a/tests/script/tsim/sync/vnodesnapshot-rsma-test.sim
+++ b/tests/script/tsim/sync/vnodesnapshot-rsma-test.sim
@@ -47,7 +47,7 @@ endi
$replica = 3
$vgroups = 1
-$retentions = 5s:7d,15s:21d
+$retentions = 5s:7d,15s:21d,1m:365d
print ============= create database
sql create database db replica $replica vgroups $vgroups retentions $retentions
@@ -114,7 +114,7 @@ endi
vg_ready:
print ====> create stable/child table
-sql create table stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int) rollup(sum)
+sql create table stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int) rollup(sum) watermark 3s,3s max_delay 3s,3s
sql show stables
if $rows != 1 then
@@ -129,20 +129,28 @@ system sh/exec.sh -n dnode4 -s stop -x SIGINT
sleep 3000
-print ===> write 100 records
-$N = 100
-$count = 0
-while $count < $N
- $ms = 1659000000000 + $count
- sql insert into ct1 values( $ms , $count , 2.1, 3.1)
- $count = $count + 1
-endw
+print ===> write 0-50 records
+$ms = 0
+$cnt = 0
+while $cnt < 50
+ $ms = $cnt . m
+ sql insert into ct1 values (now + $ms , $cnt , 2.1, 3.1)
+ $cnt = $cnt + 1
+ endw
+print ===> flush database db
+sql flush database db;
+sleep 5000
+print ===> write 51-100 records
+while $cnt < 100
+ $ms = $cnt . m
+ sql insert into ct1 values (now + $ms , $cnt , 2.1, 3.1)
+ $cnt = $cnt + 1
+ endw
-#sql flush database db;
-
-
-sleep 3000
+print ===> flush database db
+sql flush database db;
+sleep 5000
print ===> stop dnode1 dnode2 dnode3
@@ -150,8 +158,6 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
-sleep 10000
-
########################################################
print ===> start dnode1 dnode2 dnode3 dnode4
system sh/exec.sh -n dnode1 -s start
@@ -164,7 +170,7 @@ sleep 3000
print =============== query data
sql connect
sql use db
-sql select * from ct1
+sql select * from ct1 where ts > now - 1d
print rows: $rows
print $data00 $data01 $data02
if $rows != 100 then
diff --git a/tests/script/tsim/tag/change.sim b/tests/script/tsim/tag/change.sim
index 236ad8ea67..13b2da4693 100644
--- a/tests/script/tsim/tag/change.sim
+++ b/tests/script/tsim/tag/change.sim
@@ -311,7 +311,7 @@ sql select * from $mt where tgcol2 = 1 -x step52
return -1
step52:
-sql select * from $mt where tgcol3 = 1
+sql select * from $mt where tgcol3 < 2
print $data01 $data02 $data03
if $rows != 1 then
return -1
diff --git a/tests/script/tsim/tag/delete.sim b/tests/script/tsim/tag/delete.sim
index bcfd822dbd..720f4341f9 100644
--- a/tests/script/tsim/tag/delete.sim
+++ b/tests/script/tsim/tag/delete.sim
@@ -97,10 +97,10 @@ if $data23 != TAG then
return -1
endi
+sql alter table $mt drop tag tgcol2
sql alter table $mt drop tag tgcol1 -x step40
return -1
step40:
-sql alter table $mt drop tag tgcol2
print =============== step5
$i = 5
@@ -123,11 +123,11 @@ if $data03 != 2 then
return -1
endi
+sql alter table $mt drop tag tgcol2
sql alter table $mt drop tag tgcol1 -x step50
return -1
step50:
-sql alter table $mt drop tag tgcol2
-
+
print =============== step6
$i = 6
$mt = $mtPrefix . $i
@@ -186,7 +186,7 @@ endi
if $data31 != TINYINT then
return -1
endi
-if $data41 != BINARY then
+if $data41 != VARCHAR then
return -1
endi
if $data22 != 2 then
@@ -405,8 +405,6 @@ sql alter table $mt drop tag tgcol3
sql alter table $mt drop tag tgcol4
sql alter table $mt drop tag tgcol6
-sleep 3000
-
print =============== step2
$i = 2
$mt = $mtPrefix . $i
diff --git a/tests/script/tsim/trans/create_db.sim b/tests/script/tsim/trans/create_db.sim
index 4a20f73e6c..057711aa88 100644
--- a/tests/script/tsim/trans/create_db.sim
+++ b/tests/script/tsim/trans/create_db.sim
@@ -32,7 +32,7 @@ if $data(2)[4] != ready then
endi
print =============== kill dnode2
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
+system sh/exec.sh -n dnode2 -s stop -x SIGKILL
print =============== create database
sql show transactions
@@ -88,7 +88,7 @@ endi
sql show transactions
if $rows != 0 then
- return -1
+ goto step2
endi
sql_error create database d1 vgroups 2;
diff --git a/tests/system-test/0-others/cachemodel.py b/tests/system-test/0-others/cachemodel.py
index 102a34612d..7fc2003983 100644
--- a/tests/system-test/0-others/cachemodel.py
+++ b/tests/system-test/0-others/cachemodel.py
@@ -11,7 +11,7 @@ from util.dnodes import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
+ updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
@@ -37,7 +37,7 @@ class TDTestCase:
def illegal_params(self):
illegal_params = ["1","0","NULL","False","True" ,"keep","now" ,"*" , "," ,"_" , "abc" ,"keep"]
-
+
for value in illegal_params:
tdSql.error("create database testdb replica 1 cachemodel '%s' " %value)
@@ -80,9 +80,9 @@ class TDTestCase:
tdSql.execute(" insert into tb2 values(now , %d, %f)" %(k,k*10) )
def check_cachemodel_sets(self):
-
-
- # check cache_last value for database
+
+
+ # check cache_last value for database
tdSql.query(" show databases ")
databases_infos = tdSql.queryResult
@@ -96,10 +96,10 @@ class TDTestCase:
continue
cache_lasts[dbname]=self.getCacheModelNum(cache_last_value)
-
- # cache_last_set value
+
+ # cache_last_set value
for k , v in cache_lasts.items():
-
+
if k=="testdb_"+str(self.getCacheModelStr(v)):
tdLog.info(" database %s cache_last value check pass, value is %s "%(k,self.getCacheModelStr(v)) )
else:
@@ -116,7 +116,7 @@ class TDTestCase:
dataPath = buildPath + "/../sim/dnode1/data"
abs_vnodePath = os.path.abspath(dataPath)+"/vnode/"
tdLog.info("abs_vnodePath: %s" % abs_vnodePath)
-
+
tdSql.query(" show dnodes ")
dnode_id = tdSql.queryResult[0][0]
@@ -127,7 +127,7 @@ class TDTestCase:
vgroups_infos = tdSql.queryResult
for vgroup_info in vgroups_infos:
vnode_json = abs_vnodePath + "/vnode" +f"{vgroup_info[0]}/" + "vnode.json"
- vnode_info_of_db = f"cat {vnode_json}"
+ vnode_info_of_db = f"cat {vnode_json}"
vnode_info = subprocess.check_output(vnode_info_of_db, shell=True).decode("utf-8")
infoDict = json.loads(vnode_info)
vnode_json_of_dbname = f"{dnode_id}."+ dbname
@@ -142,7 +142,7 @@ class TDTestCase:
tdLog.exit("cacheLast not found in vnode.json of vnode%d "%(vgroup_info[0]))
def restart_check_cachemodel_sets(self):
-
+
for i in range(3):
tdSql.query("show dnodes")
index = tdSql.getData(0, 0)
@@ -157,7 +157,7 @@ class TDTestCase:
self.prepare_datas()
self.check_cachemodel_sets()
self.restart_check_cachemodel_sets()
-
+
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
diff --git a/tests/system-test/0-others/sysinfo.py b/tests/system-test/0-others/sysinfo.py
index f6f177d995..a4716dd544 100644
--- a/tests/system-test/0-others/sysinfo.py
+++ b/tests/system-test/0-others/sysinfo.py
@@ -33,14 +33,14 @@ class TDTestCase:
tdSql.query('select database()')
tdSql.checkData(0,0,self.dbname)
tdSql.execute(f'drop database {self.dbname}')
-
+
def check_version(self):
taos_list = ['server','client']
for i in taos_list:
tdSql.query(f'select {i}_version()')
version_info = str(subprocess.run('cat ../../source/util/src/version.c |grep "char version"', shell=True,capture_output=True).stdout.decode('utf8')).split('"')[1]
tdSql.checkData(0,0,version_info)
-
+
def get_server_status(self):
sleep(self.delaytime)
tdSql.query('select server_status()')
@@ -51,7 +51,7 @@ class TDTestCase:
if platform.system().lower() == 'windows':
sleep(10)
tdSql.error('select server_status()')
-
+
def run(self):
self.get_database_info()
self.check_version()
@@ -61,4 +61,4 @@ class TDTestCase:
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/0-others/taosShell.py b/tests/system-test/0-others/taosShell.py
index f55813ac83..4f24a3bf4a 100644
--- a/tests/system-test/0-others/taosShell.py
+++ b/tests/system-test/0-others/taosShell.py
@@ -18,7 +18,7 @@ from util.dnodes import *
def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key1='', value1=''):
if len(key) == 0:
tdLog.exit("taos test key is null!")
-
+
if platform.system().lower() == 'windows':
taosCmd = buildPath + '\\build\\bin\\taos.exe '
taosCmd = taosCmd.replace('\\','\\\\')
@@ -214,7 +214,7 @@ class TDTestCase:
retCode, retVal = taos_command(buildPath, "p", keyDict['p'], "taos>", keyDict['c'], '', "A", '')
if retCode != "TAOS_OK":
tdLog.exit("taos -A fail")
-
+
sqlString = 'create database ' + newDbName + ';'
retCode = taos_command(buildPath, "u", keyDict['u'], "taos>", keyDict['c'], sqlString, 'a', retVal)
if retCode != "TAOS_OK":
@@ -237,7 +237,7 @@ class TDTestCase:
tdLog.exit("taos -s fail")
print ("========== check new db ==========")
- tdSql.query("show databases")
+ tdSql.query("show databases")
for i in range(tdSql.queryRows):
if tdSql.getData(i, 0) == newDbName:
break
@@ -259,24 +259,24 @@ class TDTestCase:
if retCode != "TAOS_OK":
tdLog.exit("taos -s insert data fail")
- sqlString = "select * from " + newDbName + ".ctb0"
+ sqlString = "select * from " + newDbName + ".ctb0"
tdSql.query(sqlString)
tdSql.checkData(0, 0, '2021-04-01 08:00:00.000')
tdSql.checkData(0, 1, 10)
tdSql.checkData(1, 0, '2021-04-01 08:00:01.000')
tdSql.checkData(1, 1, 20)
- sqlString = "select * from " + newDbName + ".ctb1"
+ sqlString = "select * from " + newDbName + ".ctb1"
tdSql.query(sqlString)
tdSql.checkData(0, 0, '2021-04-01 08:00:00.000')
tdSql.checkData(0, 1, 11)
tdSql.checkData(1, 0, '2021-04-01 08:00:01.000')
tdSql.checkData(1, 1, 21)
-
+
keyDict['s'] = "\"select * from " + newDbName + ".ctb0\""
retCode = taos_command(buildPath, "s", keyDict['s'], "2021-04-01 08:00:01.000", keyDict['c'], '', '', '')
if retCode != "TAOS_OK":
tdLog.exit("taos -r show fail")
-
+
tdLog.printNoPrefix("================================ parameter: -r")
keyDict['s'] = "\"select * from " + newDbName + ".ctb0\""
retCode = taos_command(buildPath, "s", keyDict['s'], "1617235200000", keyDict['c'], '', 'r', '')
@@ -287,9 +287,9 @@ class TDTestCase:
retCode = taos_command(buildPath, "s", keyDict['s'], "1617235201000", keyDict['c'], '', 'r', '')
if retCode != "TAOS_OK":
tdLog.exit("taos -r show fail")
-
+
tdSql.query('drop database %s'%newDbName)
-
+
tdLog.printNoPrefix("================================ parameter: -f")
pwd=os.getcwd()
newDbName="dbf"
@@ -298,15 +298,15 @@ class TDTestCase:
sql2 = "echo use " + newDbName + " >> " + sqlFile
if platform.system().lower() == 'windows':
sql3 = "echo create table ntbf (ts timestamp, c binary(40)) >> " + sqlFile
- sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile
+ sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile
else:
sql3 = "echo 'create table ntbf (ts timestamp, c binary(40))' >> " + sqlFile
- sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile
- sql5 = "echo show databases >> " + sqlFile
- os.system(sql1)
- os.system(sql2)
- os.system(sql3)
- os.system(sql4)
+ sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile
+ sql5 = "echo show databases >> " + sqlFile
+ os.system(sql1)
+ os.system(sql2)
+ os.system(sql3)
+ os.system(sql4)
os.system(sql5)
keyDict['f'] = pwd + "/0-others/sql.txt"
@@ -316,7 +316,7 @@ class TDTestCase:
tdLog.exit("taos -f fail")
print ("========== check new db ==========")
- tdSql.query("show databases")
+ tdSql.query("show databases")
for i in range(tdSql.queryRows):
#print ("dbseq: %d, dbname: %s"%(i, tdSql.getData(i, 0)))
if tdSql.getData(i, 0) == newDbName:
@@ -324,13 +324,13 @@ class TDTestCase:
else:
tdLog.exit("create db fail after taos -f fail")
- sqlString = "select * from " + newDbName + ".ntbf"
+ sqlString = "select * from " + newDbName + ".ntbf"
tdSql.query(sqlString)
tdSql.checkData(0, 0, '2021-04-01 08:00:00.000')
tdSql.checkData(0, 1, 'test taos -f1')
tdSql.checkData(1, 0, '2021-04-01 08:00:01.000')
tdSql.checkData(1, 1, 'test taos -f2')
-
+
shellCmd = "rm -f " + sqlFile
os.system(shellCmd)
tdSql.query('drop database %s'%newDbName)
@@ -345,9 +345,9 @@ class TDTestCase:
#print ("-C return content:\n ", retVal)
totalCfgItem = {"firstEp":['', '', ''], }
for line in retVal.splitlines():
- strList = line.split()
+ strList = line.split()
if (len(strList) > 2):
- totalCfgItem[strList[1]] = strList
+ totalCfgItem[strList[1]] = strList
#print ("dict content:\n ", totalCfgItem)
firstEp = keyDict["h"] + ':' + keyDict['P']
@@ -356,8 +356,8 @@ class TDTestCase:
if (totalCfgItem["rpcDebugFlag"][2] != self.rpcDebugFlagVal) and (totalCfgItem["rpcDebugFlag"][0] != 'cfg_file'):
tdLog.exit("taos -C return rpcDebugFlag error!")
-
- count = os.cpu_count()
+
+ count = os.cpu_count()
if (totalCfgItem["numOfCores"][2] != count) and (totalCfgItem["numOfCores"][0] != 'default'):
tdLog.exit("taos -C return numOfCores error!")
diff --git a/tests/system-test/0-others/taosShellError.py b/tests/system-test/0-others/taosShellError.py
index 5735e55c03..8666c2e54d 100644
--- a/tests/system-test/0-others/taosShellError.py
+++ b/tests/system-test/0-others/taosShellError.py
@@ -18,7 +18,7 @@ from util.dnodes import *
def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key1='', value1=''):
if len(key) == 0:
tdLog.exit("taos test key is null!")
-
+
if platform.system().lower() == 'windows':
taosCmd = buildPath + '\\build\\bin\\taos.exe '
taosCmd = taosCmd.replace('\\','\\\\')
@@ -231,7 +231,7 @@ class TDTestCase:
tdLog.info("taos -P %s test success"%keyDict['P'])
else:
tdLog.exit("taos -P %s fail"%keyDict['P'])
-
+
tdLog.printNoPrefix("================================ parameter: -f with error sql ")
pwd=os.getcwd()
newDbName="dbf"
@@ -240,15 +240,15 @@ class TDTestCase:
sql2 = "echo use " + newDbName + " >> " + sqlFile
if platform.system().lower() == 'windows':
sql3 = "echo create table ntbf (ts timestamp, c binary(40)) no this item >> " + sqlFile
- sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile
+ sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile
else:
sql3 = "echo 'create table ntbf (ts timestamp, c binary(40)) no this item' >> " + sqlFile
- sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile
- sql5 = "echo show databases >> " + sqlFile
- os.system(sql1)
- os.system(sql2)
- os.system(sql3)
- os.system(sql4)
+ sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile
+ sql5 = "echo show databases >> " + sqlFile
+ os.system(sql1)
+ os.system(sql2)
+ os.system(sql3)
+ os.system(sql4)
os.system(sql5)
keyDict['f'] = pwd + "/0-others/sql.txt"
@@ -258,7 +258,7 @@ class TDTestCase:
tdLog.exit("taos -f fail")
print ("========== check new db ==========")
- tdSql.query("show databases")
+ tdSql.query("show databases")
for i in range(tdSql.queryRows):
#print ("dbseq: %d, dbname: %s"%(i, tdSql.getData(i, 0)))
if tdSql.getData(i, 0) == newDbName:
@@ -266,9 +266,9 @@ class TDTestCase:
else:
tdLog.exit("create db fail after taos -f fail")
- sqlString = "select * from " + newDbName + ".ntbf"
+ sqlString = "select * from " + newDbName + ".ntbf"
tdSql.error(sqlString)
-
+
shellCmd = "rm -f " + sqlFile
os.system(shellCmd)
@@ -281,16 +281,16 @@ class TDTestCase:
tdSql.query('drop database %s'%newDbName)
tdLog.printNoPrefix("================================ parameter: -a with error value")
- #newDbName="dba"
- errorPassword = 'errorPassword'
+ #newDbName="dba"
+ errorPassword = 'errorPassword'
sqlString = 'create database ' + newDbName + ';'
retCode, retVal = taos_command(buildPath, "u", keyDict['u'], "taos>", keyDict['c'], sqlString, 'a', errorPassword)
if retCode != "TAOS_FAIL":
tdLog.exit("taos -u %s -a %s"%(keyDict['u'], errorPassword))
tdLog.printNoPrefix("================================ parameter: -p with error value")
- #newDbName="dba"
- keyDict['p'] = 'errorPassword'
+ #newDbName="dba"
+ keyDict['p'] = 'errorPassword'
retCode, retVal = taos_command(buildPath, "u", keyDict['u'], "taos>", keyDict['c'], sqlString, 'p', keyDict['p'])
if retCode == "TAOS_FAIL" and "Authentication failure" in retVal:
tdLog.info("taos -p %s test success"%keyDict['p'])
diff --git a/tests/system-test/0-others/taosShellNetChk.py b/tests/system-test/0-others/taosShellNetChk.py
index 22c9c8c0c5..dd44852d49 100644
--- a/tests/system-test/0-others/taosShellNetChk.py
+++ b/tests/system-test/0-others/taosShellNetChk.py
@@ -18,7 +18,7 @@ from util.dnodes import *
def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key1='', value1=''):
if len(key) == 0:
tdLog.exit("taos test key is null!")
-
+
if platform.system().lower() == 'windows':
taosCmd = buildPath + '\\build\\bin\\taos.exe '
taosCmd = taosCmd.replace('\\','\\\\')
@@ -158,34 +158,34 @@ class TDTestCase:
if "2: service ok" in retVal:
tdLog.info("taos -k success")
else:
- tdLog.info(retVal)
+ tdLog.info(retVal)
tdLog.exit("taos -k fail 1")
# stop taosd
tdDnodes.stop(1)
#sleep(10)
#tdDnodes.start(1)
- #sleep(5)
+ #sleep(5)
retCode, retVal = taos_command(buildPath, "k", '', "", keyDict['c'], sqlString)
if "0: unavailable" in retVal:
tdLog.info("taos -k success")
else:
- tdLog.info(retVal)
+ tdLog.info(retVal)
tdLog.exit("taos -k fail 2")
# restart taosd
tdDnodes.start(1)
- #sleep(5)
+ #sleep(5)
retCode, retVal = taos_command(buildPath, "k", '', "", keyDict['c'], sqlString)
if "2: service ok" in retVal:
tdLog.info("taos -k success")
else:
- tdLog.info(retVal)
+ tdLog.info(retVal)
tdLog.exit("taos -k fail 3")
tdLog.printNoPrefix("================================ parameter: -n")
# stop taosd
- tdDnodes.stop(1)
+ tdDnodes.stop(1)
try:
role = 'server'
@@ -220,7 +220,7 @@ class TDTestCase:
#print(child.after.decode())
if i == 0:
tdLog.exit('taos -n server fail!')
-
+
expectString1 = 'response is received, size:' + pktLen
expectSTring2 = pktNum + '/' + pktNum
if expectString1 in retResult and expectSTring2 in retResult:
diff --git a/tests/system-test/0-others/taosdMonitor.py b/tests/system-test/0-others/taosdMonitor.py
index 4c5a434f0c..4466c4a854 100644
--- a/tests/system-test/0-others/taosdMonitor.py
+++ b/tests/system-test/0-others/taosdMonitor.py
@@ -51,7 +51,7 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
if "version" not in infoDict["cluster_info"] or infoDict["cluster_info"]["version"] == None:
tdLog.exit("first_ep_dnode_id is null!")
-
+
if "master_uptime" not in infoDict["cluster_info"] or infoDict["cluster_info"]["master_uptime"] == None:
tdLog.exit("master_uptime is null!")
@@ -69,13 +69,13 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
if "dnodes" not in infoDict["cluster_info"] or infoDict["cluster_info"]["dnodes"] == None :
tdLog.exit("dnodes is null!")
-
+
dnodes_info = { "dnode_id": 1,"dnode_ep": self.hostPort,"status":"ready"}
-
+
for k ,v in dnodes_info.items():
if k not in infoDict["cluster_info"]["dnodes"][0] or v != infoDict["cluster_info"]["dnodes"][0][k] :
tdLog.exit("dnodes info is null!")
-
+
mnodes_info = { "mnode_id":1, "mnode_ep": self.hostPort,"role": "leader" }
for k ,v in mnodes_info.items():
@@ -86,7 +86,7 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
if "vgroup_infos" not in infoDict or infoDict["vgroup_infos"]== None:
tdLog.exit("vgroup_infos is null!")
-
+
vgroup_infos_nums = len(infoDict["vgroup_infos"])
for index in range(vgroup_infos_nums):
@@ -116,14 +116,14 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
if "timeseries_total" not in infoDict["grant_info"] or not infoDict["grant_info"]["timeseries_total"] > 0:
tdLog.exit("timeseries_total is null!")
-
+
# dnode_info ====================================
if "dnode_info" not in infoDict or infoDict["dnode_info"]== None:
tdLog.exit("dnode_info is null!")
- dnode_infos = ['uptime', 'cpu_engine', 'cpu_system', 'cpu_cores', 'mem_engine', 'mem_system', 'mem_total', 'disk_engine',
- 'disk_used', 'disk_total', 'net_in', 'net_out', 'io_read', 'io_write', 'io_read_disk', 'io_write_disk', 'req_select',
+ dnode_infos = ['uptime', 'cpu_engine', 'cpu_system', 'cpu_cores', 'mem_engine', 'mem_system', 'mem_total', 'disk_engine',
+ 'disk_used', 'disk_total', 'net_in', 'net_out', 'io_read', 'io_write', 'io_read_disk', 'io_write_disk', 'req_select',
'req_select_rate', 'req_insert', 'req_insert_success', 'req_insert_rate', 'req_insert_batch', 'req_insert_batch_success',
'req_insert_batch_rate', 'errors', 'vnodes_num', 'masters', 'has_mnode', 'has_qnode', 'has_snode', 'has_bnode']
for elem in dnode_infos:
@@ -134,7 +134,7 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
if "disk_infos" not in infoDict or infoDict["disk_infos"]== None:
tdLog.exit("disk_infos is null!")
-
+
# bug for data_dir
if "datadir" not in infoDict["disk_infos"] or len(infoDict["disk_infos"]["datadir"]) <=0 :
tdLog.exit("datadir is null!")
@@ -187,7 +187,7 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
# log_infos ====================================
-
+
if "log_infos" not in infoDict or infoDict["log_infos"]== None:
tdLog.exit("log_infos is null!")
@@ -206,13 +206,13 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
if "summary" not in infoDict["log_infos"] or len(infoDict["log_infos"]["summary"])!= 4:
tdLog.exit("summary is null!")
-
+
if "total" not in infoDict["log_infos"]["summary"][0] or infoDict["log_infos"]["summary"][0]["total"] < 0 :
tdLog.exit("total is null!")
if "level" not in infoDict["log_infos"]["summary"][0] or infoDict["log_infos"]["summary"][0]["level"] not in ["error" ,"info" , "debug" ,"trace"]:
tdLog.exit("level is null!")
-
+
def do_GET(self):
"""
process GET request
@@ -227,25 +227,25 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
if contentEncoding == 'gzip':
req_body = self.rfile.read(int(self.headers["Content-Length"]))
plainText = gzip.decompress(req_body).decode()
- else:
+ else:
plainText = self.rfile.read(int(self.headers["Content-Length"])).decode()
-
+
print(plainText)
# 1. send response code and header
- self.send_response(200)
+ self.send_response(200)
self.send_header("Content-Type", "text/html; charset=utf-8")
self.end_headers()
-
+
# 2. send response content
#self.wfile.write(("Hello World: " + req_body + "\n").encode("utf-8"))
-
+
# 3. check request body info
infoDict = json.loads(plainText)
#print("================")
# print(infoDict)
self.telemetryInfoCheck(infoDict)
- # 4. shutdown the server and exit case
+ # 4. shutdown the server and exit case
assassin = threading.Thread(target=self.server.shutdown)
assassin.daemon = True
assassin.start()
@@ -287,7 +287,7 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
-
+
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
# time.sleep(2)
diff --git a/tests/system-test/0-others/telemetry.py b/tests/system-test/0-others/telemetry.py
index 4483e113bf..812b8b40c5 100644
--- a/tests/system-test/0-others/telemetry.py
+++ b/tests/system-test/0-others/telemetry.py
@@ -100,9 +100,9 @@ def telemetryInfoCheck(infoDict=''):
if "compStorage" not in infoDict or infoDict["compStorage"] < 0:
tdLog.exit("compStorage is null!")
-
-class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
+
+class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
def do_GET(self):
"""
process GET request
@@ -117,26 +117,26 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
if contentEncoding == 'gzip':
req_body = self.rfile.read(int(self.headers["Content-Length"]))
plainText = gzip.decompress(req_body).decode()
- else:
+ else:
plainText = self.rfile.read(int(self.headers["Content-Length"])).decode()
print("monitor info:\n%s"%plainText)
# 1. send response code and header
- self.send_response(200)
+ self.send_response(200)
self.send_header("Content-Type", "text/html; charset=utf-8")
self.end_headers()
-
+
# 2. send response content
#self.wfile.write(("Hello World: " + req_body + "\n").encode("utf-8"))
-
+
# 3. check request body info
infoDict = json.loads(plainText)
#print("================")
#print(infoDict)
telemetryInfoCheck(infoDict)
- # 4. shutdown the server and exit case
+ # 4. shutdown the server and exit case
assassin = threading.Thread(target=self.server.shutdown)
assassin.daemon = True
assassin.start()
@@ -176,7 +176,7 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
-
+
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
# time.sleep(2)
diff --git a/tests/system-test/0-others/udfTest.py b/tests/system-test/0-others/udfTest.py
index ddbbd9b2de..c4c40348b8 100644
--- a/tests/system-test/0-others/udfTest.py
+++ b/tests/system-test/0-others/udfTest.py
@@ -512,7 +512,7 @@ class TDTestCase:
"select c1,c2, udf1(c1,c2) from stb1 group by c1,c2" ,
"select num1,num2,num3,udf1(num1,num2,num3) from tb" ,
"select c1,c6,udf1(c1,c6) from stb1 order by ts" ,
- "select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
+ "select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
]
udf2_sqls = ["select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
"select udf2(c1) from stb1 group by 1-udf1(c1)" ,
diff --git a/tests/system-test/0-others/udf_cfg1.py b/tests/system-test/0-others/udf_cfg1.py
index fc9265617d..e6ab57b488 100644
--- a/tests/system-test/0-others/udf_cfg1.py
+++ b/tests/system-test/0-others/udf_cfg1.py
@@ -190,7 +190,7 @@ class TDTestCase:
tdSql.execute("use db ")
tdSql.error("select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb")
tdSql.error("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1")
-
+
# aggregate functions
tdSql.error("select udf2(num1) ,udf2(num2), udf2(num3) from tb")
diff --git a/tests/system-test/0-others/udf_cfg2.py b/tests/system-test/0-others/udf_cfg2.py
index 07f83b1455..3f8ba37491 100644
--- a/tests/system-test/0-others/udf_cfg2.py
+++ b/tests/system-test/0-others/udf_cfg2.py
@@ -514,7 +514,7 @@ class TDTestCase:
"select c1,c2, udf1(c1,c2) from stb1 group by c1,c2" ,
"select num1,num2,num3,udf1(num1,num2,num3) from tb" ,
"select c1,c6,udf1(c1,c6) from stb1 order by ts" ,
- "select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
+ "select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
]
udf2_sqls = ["select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
"select udf2(c1) from stb1 group by 1-udf1(c1)" ,
diff --git a/tests/system-test/0-others/udf_cluster.py b/tests/system-test/0-others/udf_cluster.py
index 9ef3137a7e..1ca1738332 100644
--- a/tests/system-test/0-others/udf_cluster.py
+++ b/tests/system-test/0-others/udf_cluster.py
@@ -1,7 +1,7 @@
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -16,7 +16,7 @@ class MyDnodes(TDDnodes):
super(MyDnodes,self).__init__()
self.dnodes = dnodes_lists # dnode must be TDDnode instance
self.simDeployed = False
-
+
class TDTestCase:
def init(self,conn ,logSql):
@@ -26,7 +26,7 @@ class TDTestCase:
self.master_dnode = self.TDDnodes.dnodes[0]
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
tdSql.init(conn1.cursor())
-
+
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -43,7 +43,7 @@ class TDTestCase:
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
-
+
def prepare_udf_so(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -61,7 +61,7 @@ class TDTestCase:
def prepare_data(self):
-
+
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db replica 1 duration 300")
tdSql.execute("use db")
@@ -71,7 +71,7 @@ class TDTestCase:
tags (t1 int)
'''
)
-
+
tdSql.execute(
'''
create table t1
@@ -142,7 +142,7 @@ class TDTestCase:
# create aggregate functions
tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8;")
-
+
# functions = tdSql.getResult("show functions")
# function_nums = len(functions)
# if function_nums == 2:
@@ -167,14 +167,14 @@ class TDTestCase:
# create aggregate functions
tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8;")
-
+
functions = tdSql.getResult("show functions")
function_nums = len(functions)
if function_nums == 2:
tdLog.info("create two udf functions success ")
-
+
def basic_udf_query(self , dnode):
-
+
mytdSql = self.getConnection(dnode)
# scalar functions
@@ -229,7 +229,7 @@ class TDTestCase:
else:
tdLog.info(" UDF query check failed at :dnode_index %s" %dnode.index)
tdLog.exit("query check failed at :dnode_index %s" %dnode.index )
-
+
def check_UDF_query(self):
@@ -238,10 +238,10 @@ class TDTestCase:
self.basic_udf_query(dnode)
- def depoly_cluster(self ,dnodes_nums):
+ def depoly_cluster(self ,dnodes_nums):
testCluster = False
- valgrind = 0
+ valgrind = 0
hostname = socket.gethostname()
dnodes = []
start_port = 6030
@@ -253,7 +253,7 @@ class TDTestCase:
dnode.addExtraCfg("monitorFqdn", hostname)
dnode.addExtraCfg("monitorPort", 7043)
dnodes.append(dnode)
-
+
self.TDDnodes = MyDnodes(dnodes)
self.TDDnodes.init("")
self.TDDnodes.setTestCluster(testCluster)
@@ -261,11 +261,11 @@ class TDTestCase:
self.TDDnodes.stopAll()
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.deploy(dnode.index,{})
-
+
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.start(dnode.index)
- # create cluster
+ # create cluster
for dnode in self.TDDnodes.dnodes:
print(dnode.cfgDict)
@@ -275,12 +275,12 @@ class TDTestCase:
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
print(cmd)
os.system(cmd)
-
+
time.sleep(2)
tdLog.info(" create cluster done! ")
-
-
+
+
def getConnection(self, dnode):
host = dnode.cfgDict["fqdn"]
port = dnode.cfgDict["serverPort"]
@@ -288,23 +288,23 @@ class TDTestCase:
return taos.connect(host=host, port=int(port), config=config_dir)
def restart_udfd(self, dnode):
-
+
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
-
+
cfgPath = dnode.cfgDir
-
+
udfdPath = buildPath +'/build/bin/udfd'
for i in range(5):
tdLog.info(" loop restart udfd %d_th at dnode_index : %s" % (i ,dnode.index))
self.basic_udf_query(dnode)
- # stop udfd cmds
+ # stop udfd cmds
get_processID = "ps -ef | grep -w udfd | grep %s | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}'"%cfgPath
processID = subprocess.check_output(get_processID, shell=True).decode("utf-8")
stop_udfd = " kill -9 %s" % processID
@@ -327,12 +327,12 @@ class TDTestCase:
# self.check_UDF_query()
self.restart_udfd(self.master_dnode)
# self.test_restart_udfd_All_dnodes()
-
-
+
+
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/0-others/udf_create.py b/tests/system-test/0-others/udf_create.py
index e53ed651f0..788202eb1b 100644
--- a/tests/system-test/0-others/udf_create.py
+++ b/tests/system-test/0-others/udf_create.py
@@ -514,7 +514,7 @@ class TDTestCase:
"select c1,c2, udf1(c1,c2) from stb1 group by c1,c2" ,
"select num1,num2,num3,udf1(num1,num2,num3) from tb" ,
"select c1,c6,udf1(c1,c6) from stb1 order by ts" ,
- "select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
+ "select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
]
udf2_sqls = ["select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
"select udf2(c1) from stb1 group by 1-udf1(c1)" ,
diff --git a/tests/system-test/1-insert/delete_data.py b/tests/system-test/1-insert/delete_data.py
index 1eb270d997..068d212ac4 100644
--- a/tests/system-test/1-insert/delete_data.py
+++ b/tests/system-test/1-insert/delete_data.py
@@ -50,9 +50,9 @@ class TDTestCase:
'col11': 'bool',
'col12': f'binary({self.str_length})',
'col13': f'nchar({self.str_length})',
-
+
}
-
+
self.tinyint_val = random.randint(constant.TINYINT_MIN,constant.TINYINT_MAX)
self.smallint_val = random.randint(constant.SMALLINT_MIN,constant.SMALLINT_MAX)
self.int_val = random.randint(constant.INT_MIN,constant.INT_MAX)
@@ -100,15 +100,15 @@ class TDTestCase:
elif col_type.lower() == 'bigint unsigned':
tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["bigint unsigned"]})')
elif col_type.lower() == 'bool':
- tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["bool"]})')
+ tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["bool"]})')
elif col_type.lower() == 'float':
- tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["float"]})')
+ tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["float"]})')
elif col_type.lower() == 'double':
tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["double"]})')
elif 'binary' in col_type.lower():
tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{base_data['binary']}")''')
elif 'nchar' in col_type.lower():
- tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{base_data['nchar']}")''')
+ tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{base_data['nchar']}")''')
def delete_all_data(self,tbname,col_type,row_num,base_data,dbname,tb_type,tb_num=1):
tdSql.execute(f'delete from {tbname}')
tdSql.execute(f'flush database {dbname}')
@@ -164,7 +164,7 @@ class TDTestCase:
elif 'nchar' in column_type.lower():
tdSql.checkEqual(tdSql.queryResult[0][0],base_data['nchar'])
else:
- tdSql.checkEqual(tdSql.queryResult[0][0],base_data[column_type])
+ tdSql.checkEqual(tdSql.queryResult[0][0],base_data[column_type])
def delete_rows(self,dbname,tbname,col_name,col_type,base_data,row_num,tb_type,tb_num=1):
for i in range(row_num):
tdSql.execute(f'delete from {tbname} where ts>{self.ts+i}')
@@ -189,7 +189,7 @@ class TDTestCase:
elif tb_type == 'stb':
tdSql.checkRows(i*tb_num)
for j in range(tb_num):
- self.insert_base_data(col_type,f'{tbname}_{j}',row_num,base_data)
+ self.insert_base_data(col_type,f'{tbname}_{j}',row_num,base_data)
for i in range(row_num):
tdSql.execute(f'delete from {tbname} where ts<={self.ts+i}')
tdSql.execute(f'flush database {dbname}')
@@ -240,7 +240,7 @@ class TDTestCase:
tdSql.error(f'''delete from {tbname} where {error_list} {column_name} ="{base_data['nchar']}"''')
else:
tdSql.error(f'delete from {tbname} where {error_list} {column_name} = {base_data[column_type]}')
-
+
def delete_data_ntb(self):
tdSql.execute(f'create database if not exists {self.dbname}')
tdSql.execute(f'use {self.dbname}')
@@ -295,4 +295,4 @@ class TDTestCase:
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/1-insert/influxdb_line_taosc_insert.py b/tests/system-test/1-insert/influxdb_line_taosc_insert.py
index 10e81892ef..25e2378f46 100644
--- a/tests/system-test/1-insert/influxdb_line_taosc_insert.py
+++ b/tests/system-test/1-insert/influxdb_line_taosc_insert.py
@@ -26,13 +26,13 @@ from util.common import tdCom
import platform
import io
if platform.system().lower() == 'windows':
- sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')
+ sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
- self._conn = conn
+ self._conn = conn
def createDb(self, name="test", db_update_tag=0):
if db_update_tag == 0:
@@ -67,7 +67,7 @@ class TDTestCase:
td_ts = time.strftime("%Y-%m-%d %H:%M:%S.{}".format(ulsec), time.localtime(ts))
return td_ts
#return repr(datetime.datetime.strptime(td_ts, "%Y-%m-%d %H:%M:%S.%f"))
-
+
def dateToTs(self, datetime_input):
return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f")))
@@ -274,7 +274,7 @@ class TDTestCase:
input_sql = self.gen_influxdb_line(stb_name, tb_name, id, t0, t1, t2, t3, t4, t5, t6, t7, t8, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, ts,
id_noexist_tag, id_change_tag, id_double_tag, ct_add_tag, ct_am_tag, ct_ma_tag, ct_min_tag, c_multi_tag, t_multi_tag, c_blank_tag, t_blank_tag, chinese_tag)
return input_sql, stb_name
-
+
def genMulTagColStr(self, gen_type, count):
"""
gen_type must be "tag"/"col"
@@ -370,10 +370,10 @@ class TDTestCase:
for t_type in full_type_list:
input_sql, stb_name = self.genFullTypeSql(c0=t_type, t0=t_type)
self.resCmp(input_sql, stb_name)
-
+
def symbolsCheckCase(self):
"""
- check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/?
+ check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/?
"""
'''
please test :
@@ -395,7 +395,7 @@ class TDTestCase:
for ts in ts_list:
input_sql, stb_name = self.genFullTypeSql(ts=ts)
self.resCmp(input_sql, stb_name, ts=ts)
-
+
def idSeqCheckCase(self):
"""
check id.index in tags
@@ -404,7 +404,7 @@ class TDTestCase:
tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(id_change_tag=True)
self.resCmp(input_sql, stb_name)
-
+
def idUpperCheckCase(self):
"""
check id param
@@ -444,7 +444,7 @@ class TDTestCase:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
-
+
def idIllegalNameCheckCase(self):
"""
test illegal id name
@@ -490,7 +490,7 @@ class TDTestCase:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
-
+
def illegalTsCheckCase(self):
"""
check ts format like 16260068336390us19
@@ -575,11 +575,11 @@ class TDTestCase:
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
- # binary
+ # binary
stb_name = tdCom.getLongName(7, "letters")
input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}" c0=f 1626006833639000000'
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
-
+
input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16375, "letters")}" c0=f 1626006833639000000'
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -647,7 +647,7 @@ class TDTestCase:
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
- # f32
+ # f32
for c5 in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]:
input_sql, stb_name = self.genFullTypeSql(c5=c5)
self.resCmp(input_sql, stb_name)
@@ -671,11 +671,11 @@ class TDTestCase:
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
- # # # binary
+ # # # binary
# stb_name = tdCom.getLongName(7, "letters")
# input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}" 1626006833639000000'
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
-
+
# input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16375, "letters")}" 1626006833639000000'
# try:
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -715,13 +715,13 @@ class TDTestCase:
# i8 i16 i32 i64 f32 f64
for input_sql in [
- self.genFullTypeSql(t1="1s2i8")[0],
+ self.genFullTypeSql(t1="1s2i8")[0],
self.genFullTypeSql(t2="1s2i16")[0],
self.genFullTypeSql(t3="1s2i32")[0],
self.genFullTypeSql(t4="1s2i64")[0],
self.genFullTypeSql(t5="11.1s45f32")[0],
- self.genFullTypeSql(t6="11.1s45f64")[0],
- self.genFullTypeSql(c1="1s2i8")[0],
+ self.genFullTypeSql(t6="11.1s45f64")[0],
+ self.genFullTypeSql(c1="1s2i8")[0],
self.genFullTypeSql(c2="1s2i16")[0],
self.genFullTypeSql(c3="1s2i32")[0],
self.genFullTypeSql(c4="1s2i64")[0],
@@ -746,14 +746,14 @@ class TDTestCase:
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
- # check accepted binary and nchar symbols
+ # check accepted binary and nchar symbols
# # * ~!@#$¥%^&*()-+={}|[]、「」:;
for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'):
input_sql1 = f'{stb_name},t0=t c0=f,c1="abc{symbol}aaa" 1626006833639000000'
input_sql2 = f'{stb_name},t0=t,t1="abc{symbol}aaa" c0=f 1626006833639000000'
self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
# self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
-
+
def duplicateIdTagColInsertCheckCase(self):
"""
check duplicate Id Tag Col
@@ -810,7 +810,7 @@ class TDTestCase:
self.resCmp(input_sql, stb_name)
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
self.resCmp(input_sql, stb_name)
-
+
@tdCom.smlPass
def tagColBinaryNcharLengthCheckCase(self):
"""
@@ -829,7 +829,7 @@ class TDTestCase:
check column and tag count add, stb and tb duplicate
* tag: alter table ...
* col: when update==0 and ts is same, unchange
- * so this case tag&&value will be added,
+ * so this case tag&&value will be added,
* col is added without value when update==0
* col is added with value when update==1
"""
@@ -897,7 +897,7 @@ class TDTestCase:
# * every binary and nchar must be length+2, so here is two tag, max length could not larger than 16384-2*2
input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(5, "letters")}" c0=f 1626006833639000000'
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
-
+
tdSql.query(f"select * from {stb_name}")
tdSql.checkRows(2)
input_sql = f'{stb_name},t0=t,t1="{tdCom.getLongName(16374, "letters")}",t2="{tdCom.getLongName(6, "letters")}" c0=f 1626006833639000000'
@@ -922,7 +922,7 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
tdSql.query(f"select * from {stb_name}")
tdSql.checkRows(3)
-
+
# * tag nchar max is 16374/4, col+ts nchar max 49151
def tagColNcharMaxLengthCheckCase(self):
"""
@@ -977,7 +977,7 @@ class TDTestCase:
"st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933641000000"
]
self._conn.schemaless_insert(lines, TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
-
+
def multiInsertCheckCase(self, count):
"""
test multi insert
@@ -1073,7 +1073,7 @@ class TDTestCase:
self.multiThreadRun(self.genMultiThreadSeq(input_sql))
tdSql.query(f"show tables;")
tdSql.checkRows(5)
-
+
def sStbStbDdataInsertMultiThreadCheckCase(self):
"""
thread input same stb tb, different data, result keep first data
@@ -1107,7 +1107,7 @@ class TDTestCase:
tdSql.checkEqual(tb_name, expected_tb_name)
tdSql.query(f"select * from {stb_name};")
tdSql.checkRows(1)
-
+
def sStbStbDdataMtcInsertMultiThreadCheckCase(self):
"""
thread input same stb tb, different data, minus columes and tags, result keep first data
@@ -1217,7 +1217,7 @@ class TDTestCase:
tdSql.checkRows(6)
for c in ["c7", "c8", "c9"]:
tdSql.query(f"select * from {stb_name} where {c} is NULL")
- tdSql.checkRows(5)
+ tdSql.checkRows(5)
for t in ["t10", "t11"]:
tdSql.query(f"select * from {stb_name} where {t} is not NULL;")
tdSql.checkRows(6)
diff --git a/tests/system-test/1-insert/insertWithMoreVgroup.py b/tests/system-test/1-insert/insertWithMoreVgroup.py
index 80468509ee..aea0feda02 100644
--- a/tests/system-test/1-insert/insertWithMoreVgroup.py
+++ b/tests/system-test/1-insert/insertWithMoreVgroup.py
@@ -43,7 +43,7 @@ class TDTestCase:
case1: limit offset base function test
case2: offset return valid
'''
- return
+ return
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -69,7 +69,7 @@ class TDTestCase:
# self.create_tables();
self.ts = 1500000000000
- # stop
+ # stop
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
@@ -80,7 +80,7 @@ class TDTestCase:
def newcur(self,host,cfg):
user = "root"
password = "taosdata"
- port =6030
+ port =6030
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
cur=con.cursor()
print(cur)
@@ -90,7 +90,7 @@ class TDTestCase:
def create_tables(self,host,dbname,stbname,count):
buildPath = self.getBuildPath()
config = buildPath+ "../sim/dnode1/cfg/"
-
+
tsql=self.newcur(host,config)
tsql.execute("use %s" %dbname)
@@ -109,7 +109,7 @@ class TDTestCase:
tsql.execute(sql)
sql = pre_create
# print(time.time())
- # end sql
+ # end sql
if sql != pre_create:
# print(sql)
tsql.execute(sql)
@@ -122,7 +122,7 @@ class TDTestCase:
def mutiThread_create_tables(self,host,dbname,stbname,vgroups,threadNumbers,childcount):
buildPath = self.getBuildPath()
config = buildPath+ "../sim/dnode1/cfg/"
-
+
tsql=self.newcur(host,config)
tdLog.debug("create database %s"%dbname)
tsql.execute("drop database if exists %s"%dbname)
@@ -132,7 +132,7 @@ class TDTestCase:
threads = []
for i in range(threadNumbers):
tsql.execute("create stable %s%d(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%(stbname,i))
- threads.append(thd.Thread(target=self.create_tables, args=(host, dbname, stbname+"%d"%i, count,)))
+ threads.append(thd.Thread(target=self.create_tables, args=(host, dbname, stbname+"%d"%i, count,)))
start_time = time.time()
for tr in threads:
tr.start()
@@ -142,7 +142,7 @@ class TDTestCase:
spendTime=end_time-start_time
speedCreate=threadNumbers*count/spendTime
tdLog.debug("spent %.2fs to create %d stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,threadNumbers,threadNumbers*count,speedCreate))
-
+
return
# def create_tables(self,host,dbname,stbname,vgroups,tcountStart,tcountStop):
@@ -169,7 +169,7 @@ class TDTestCase:
# print(sql)
tsql.execute(sql)
sql = "insert into %s_%d values " %(stbname,i)
- # end sql
+ # end sql
if sql != pre_insert:
# print(sql)
print(len(sql))
@@ -184,7 +184,7 @@ class TDTestCase:
def mutiThread_insert_data(self, host, dbname, stbname, threadNumbers, chilCount, ts_start, childrowcount):
buildPath = self.getBuildPath()
config = buildPath+ "../sim/dnode1/cfg/"
-
+
tsql=self.newcur(host,config)
tdLog.debug("ready to inser data")
@@ -193,7 +193,7 @@ class TDTestCase:
threads = []
for i in range(threadNumbers):
# tsql.execute("create stable %s%d(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%(stbname,i))
- threads.append(thd.Thread(target=self.insert_data, args=(host, dbname, stbname+"%d"%i, chilCount, ts_start, childrowcount,)))
+ threads.append(thd.Thread(target=self.insert_data, args=(host, dbname, stbname+"%d"%i, chilCount, ts_start, childrowcount,)))
start_time = time.time()
for tr in threads:
tr.start()
@@ -224,10 +224,10 @@ class TDTestCase:
tdLog.info("taosd found in %s" % buildPath)
taosBenchbin = buildPath+ "/build/bin/taosBenchmark"
os.system("%s -f %s -y " %(taosBenchbin,jsonFile))
-
+
return
def taosBenchCreate(self,host,dropdb,dbname,stbname,vgroups,processNumbers,count):
-
+
# count=50000
buildPath = self.getBuildPath()
config = buildPath+ "../sim/dnode1/cfg/"
@@ -241,7 +241,7 @@ class TDTestCase:
# tsql.getResult("show databases")
# print(tdSql.queryResult)
tsql.execute("use %s" %dbname)
-
+
threads = []
for i in range(processNumbers):
jsonfile="1-insert/Vgroups%d%d.json"%(vgroups,i)
@@ -252,7 +252,7 @@ class TDTestCase:
os.system("sed -i 's/\"childtable_count\": 10000,/\"childtable_count\": %d,/g' %s "%(count,jsonfile))
os.system("sed -i 's/\"name\": \"stb1\",/\"name\": \"%s%d\",/g' %s "%(stbname,i,jsonfile))
os.system("sed -i 's/\"childtable_prefix\": \"stb1_\",/\"childtable_prefix\": \"%s%d_\",/g' %s "%(stbname,i,jsonfile))
- threads.append(mp.Process(target=self.taosBench, args=("%s"%jsonfile,)))
+ threads.append(mp.Process(target=self.taosBench, args=("%s"%jsonfile,)))
start_time = time.time()
for tr in threads:
tr.start()
@@ -274,10 +274,10 @@ class TDTestCase:
for i in range(stableCount):
tdSql.query("select count(*) from %s%d"%(stbname,i))
tdSql.checkData(0,0,rowsPerSTable)
- return
-
-
- # test case1 base
+ return
+
+
+ # test case1 base
def test_case1(self):
#stableCount=threadNumbersCtb
parameterDict = {'vgroups': 1, \
@@ -290,22 +290,22 @@ class TDTestCase:
'stbname': 'stb', \
'host': 'localhost', \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
-
+
tdLog.debug("-----create database and muti-thread create tables test------- ")
#host,dbname,stbname,vgroups,threadNumbers,tcountStart,tcountStop
#host, dbname, stbname, threadNumbers, chilCount, ts_start, childrowcount
self.mutiThread_create_tables(
host=parameterDict['host'],
dbname=parameterDict['dbname'],
- stbname=parameterDict['stbname'],
- vgroups=parameterDict['vgroups'],
- threadNumbers=parameterDict['threadNumbersCtb'],
+ stbname=parameterDict['stbname'],
+ vgroups=parameterDict['vgroups'],
+ threadNumbers=parameterDict['threadNumbersCtb'],
childcount=parameterDict['tablesPerStb'])
self.mutiThread_insert_data(
host=parameterDict['host'],
dbname=parameterDict['dbname'],
- stbname=parameterDict['stbname'],
+ stbname=parameterDict['stbname'],
threadNumbers=parameterDict['threadNumbersIda'],
chilCount=parameterDict['tablesPerStb'],
ts_start=parameterDict['startTs'],
@@ -315,7 +315,7 @@ class TDTestCase:
rowsPerStable=parameterDict['rowsPerTable']*parameterDict['tablesPerStb']
self.checkData(dbname=parameterDict['dbname'],stbname=parameterDict['stbname'], stableCount=parameterDict['threadNumbersCtb'],CtableCount=tableCount,rowsPerSTable=rowsPerStable)
-
+
def test_case3(self):
#stableCount=threadNumbersCtb
parameterDict = {'vgroups': 1, \
@@ -327,21 +327,21 @@ class TDTestCase:
'stbname': 'stb1', \
'host': 'localhost', \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
-
+
self.taosBenchCreate(
parameterDict['host'],
"no",
- parameterDict['dbname'],
- parameterDict['stbname'],
- parameterDict['vgroups'],
- parameterDict['threadNumbersCtb'],
+ parameterDict['dbname'],
+ parameterDict['stbname'],
+ parameterDict['vgroups'],
+ parameterDict['threadNumbersCtb'],
parameterDict['tablesPerStb'])
tableCount=parameterDict['threadNumbersCtb']*parameterDict['tablesPerStb']
rowsPerStable=parameterDict['rowsPerTable']*parameterDict['tablesPerStb']
self.checkData(
dbname=parameterDict['dbname'],
- stbname=parameterDict['stbname'],
+ stbname=parameterDict['stbname'],
stableCount=parameterDict['threadNumbersCtb'],
CtableCount=tableCount,
rowsPerSTable=rowsPerStable)
@@ -353,9 +353,9 @@ class TDTestCase:
# self.taosBenchCreate("db1", "stb1", 4, 5, 100*10000)
# self.taosBenchCreate("db1", "stb1", 1, 5, 100*10000)
- return
+ return
- # run case
+ # run case
def run(self):
# create database and tables。
@@ -368,7 +368,7 @@ class TDTestCase:
- return
+ return
#
# add case with filename
#
diff --git a/tests/system-test/1-insert/insert_drop.py b/tests/system-test/1-insert/insert_drop.py
index 5ccaa5540b..bfba4c9e4a 100644
--- a/tests/system-test/1-insert/insert_drop.py
+++ b/tests/system-test/1-insert/insert_drop.py
@@ -38,7 +38,7 @@ class TDTestCase:
tlist = self.genMultiThreadSeq(sql_list)
self.multiThreadRun(tlist)
tdSql.query(f'show databases')
-
+
def stop(self):
tdSql.close()
diff --git a/tests/system-test/1-insert/mutipythonnodebugtaosd.py b/tests/system-test/1-insert/mutipythonnodebugtaosd.py
index 3d6358f3ff..8aea53cd92 100644
--- a/tests/system-test/1-insert/mutipythonnodebugtaosd.py
+++ b/tests/system-test/1-insert/mutipythonnodebugtaosd.py
@@ -34,14 +34,14 @@ class TDTestCase:
#
# --------------- main frame -------------------
#
-
+
def caseDescription(self):
'''
limit and offset keyword function test cases;
case1: limit offset base function test
case2: offset return valid
'''
- return
+ return
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -68,7 +68,7 @@ class TDTestCase:
self.ts = 1500000000000
- # run case
+ # run case
def run(self):
# test base case
@@ -79,7 +79,7 @@ class TDTestCase:
# self.test_case2()
# tdLog.debug(" LIMIT test_case2 ............ [OK]")
- # stop
+ # stop
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
@@ -101,7 +101,7 @@ class TDTestCase:
tdSql.execute(sql)
sql = pre_create
# print(time.time())
- # end sql
+ # end sql
if sql != pre_create:
tdSql.execute(sql)
exeEndTime=time.time()
@@ -113,7 +113,7 @@ class TDTestCase:
def newcur(self,host,cfg):
user = "root"
password = "taosdata"
- port =6030
+ port =6030
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
cur=con.cursor()
print(cur)
@@ -123,7 +123,7 @@ class TDTestCase:
host = "127.0.0.1"
buildPath = self.getBuildPath()
config = buildPath+ "../sim/dnode1/cfg/"
-
+
tsql=self.newcur(host,config)
tsql.execute("drop database if exists %s" %(dbname))
tsql.execute("create database if not exists %s vgroups %d"%(dbname,vgroups))
@@ -147,7 +147,7 @@ class TDTestCase:
tsql.execute(sql)
sql = pre_create
# print(time.time())
- # end sql
+ # end sql
if sql != pre_create:
# print(sql)
tsql.execute(sql)
@@ -176,7 +176,7 @@ class TDTestCase:
# print(sql)
tdSql.execute(sql)
sql = "insert into %s_%d values " %(stbname,i)
- # end sql
+ # end sql
if sql != pre_insert:
# print(sql)
tdSql.execute(sql)
@@ -189,7 +189,7 @@ class TDTestCase:
return
- # test case1 base
+ # test case1 base
def test_case1(self):
tdLog.debug("-----create database and tables test------- ")
# tdSql.execute("drop database if exists db1")
@@ -220,7 +220,7 @@ class TDTestCase:
threads = []
threadNumbers=2
for i in range(threadNumbers):
- threads.append(mp.Process(target=self.new_create_tables, args=("db1%d"%i, vgroups, "stb1", 0,count,)))
+ threads.append(mp.Process(target=self.new_create_tables, args=("db1%d"%i, vgroups, "stb1", 0,count,)))
start_time = time.time()
for tr in threads:
tr.start()
@@ -247,7 +247,7 @@ class TDTestCase:
# tdSql.execute("create database db16 vgroups 16")
# self.create_tables("db16", "stb16", 30*10000)
- return
+ return
# test case2 base:insert data
def test_case2(self):
@@ -266,7 +266,7 @@ class TDTestCase:
tdSql.execute("create database db1 vgroups 1")
self.create_tables("db1", "stb1", 1*100)
self.insert_data("db1", "stb1", self.ts, 1*50,1*10000)
-
+
tdSql.execute("create database db4 vgroups 4")
self.create_tables("db4", "stb4", 1*100)
@@ -287,7 +287,7 @@ class TDTestCase:
tdSql.execute("create database db16 vgroups 16")
self.create_tables("db16", "stb16", 1*100)
self.insert_data("db16", "stb16", self.ts, 1*100,1*10000)
-
+
return
#
@@ -296,4 +296,4 @@ class TDTestCase:
# tdCases.addWindows(__file__, TDTestCase())
# tdCases.addLinux(__file__, TDTestCase())
case=TDTestCase()
-case.test_case1()
\ No newline at end of file
+case.test_case1()
diff --git a/tests/system-test/1-insert/opentsdb_json_taosc_insert.py b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py
index f9bc5bbaf4..003abe9d10 100644
--- a/tests/system-test/1-insert/opentsdb_json_taosc_insert.py
+++ b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py
@@ -27,13 +27,13 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
- self._conn = conn
+ self._conn = conn
self.defaultJSONStrType_value = "NCHAR"
def createDb(self, name="test", db_update_tag=0, protocol=None):
if protocol == "telnet-tcp":
name = "opentsdb_telnet"
-
+
if db_update_tag == 0:
tdSql.execute(f"drop database if exists {name}")
tdSql.execute(f"create database if not exists {name} precision 'ms' schemaless 1")
@@ -225,7 +225,7 @@ class TDTestCase:
def genTagValue(self, t0_type="bool", t0_value="", t1_type="tinyint", t1_value=127, t2_type="smallint", t2_value=32767,
t3_type="int", t3_value=2147483647, t4_type="bigint", t4_value=9223372036854775807,
- t5_type="float", t5_value=11.12345027923584, t6_type="double", t6_value=22.123456789,
+ t5_type="float", t5_value=11.12345027923584, t6_type="double", t6_value=22.123456789,
t7_type="binary", t7_value="binaryTagValue", t8_type="nchar", t8_value="ncharTagValue", value_type="obj"):
if t0_value == "":
t0_value = random.choice([True, False])
@@ -256,9 +256,9 @@ class TDTestCase:
}
return tag_value
- def genFullTypeJson(self, ts_value="", col_value="", tag_value="", stb_name="", tb_name="",
+ def genFullTypeJson(self, ts_value="", col_value="", tag_value="", stb_name="", tb_name="",
id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_mixul_tag=None, id_double_tag=None,
- t_add_tag=None, t_mul_tag=None, c_multi_tag=None, c_blank_tag=None, t_blank_tag=None,
+ t_add_tag=None, t_mul_tag=None, c_multi_tag=None, c_blank_tag=None, t_blank_tag=None,
chinese_tag=None, multi_field_tag=None, point_trans_tag=None, value_type="obj"):
if value_type == "obj":
if stb_name == "":
@@ -370,7 +370,7 @@ class TDTestCase:
if point_trans_tag is not None:
sql_json = {"metric": ".point.trans.test", "timestamp": ts_value, "value": col_value, "tags": tag_value}
return sql_json, stb_name
-
+
def genMulTagColDict(self, genType, count=1, value_type="obj"):
"""
genType must be tag/col
@@ -479,10 +479,10 @@ class TDTestCase:
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
-
+
def symbolsCheckCase(self, value_type="obj"):
"""
- check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/?
+ check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/?
"""
'''
please test :
@@ -492,9 +492,9 @@ class TDTestCase:
tdCom.cleanTb()
binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"'
nchar_symbols = binary_symbols
- input_sql1, stb_name1 = self.genFullTypeJson(col_value=self.genTsColValue(value=binary_symbols, t_type="binary", value_type=value_type),
+ input_sql1, stb_name1 = self.genFullTypeJson(col_value=self.genTsColValue(value=binary_symbols, t_type="binary", value_type=value_type),
tag_value=self.genTagValue(t7_value=binary_symbols, t8_value=nchar_symbols, value_type=value_type))
- input_sql2, stb_name2 = self.genFullTypeJson(col_value=self.genTsColValue(value=nchar_symbols, t_type="nchar", value_type=value_type),
+ input_sql2, stb_name2 = self.genFullTypeJson(col_value=self.genTsColValue(value=nchar_symbols, t_type="nchar", value_type=value_type),
tag_value=self.genTagValue(t7_value=binary_symbols, t8_value=nchar_symbols, value_type=value_type))
self.resCmp(input_sql1, stb_name1)
self.resCmp(input_sql2, stb_name2)
@@ -574,7 +574,7 @@ class TDTestCase:
tdCom.cleanTb()
input_json, stb_name = self.genFullTypeJson(id_change_tag=True, value_type=value_type)
self.resCmp(input_json, stb_name)
-
+
def idLetterCheckCase(self, value_type="obj"):
"""
check id param
@@ -618,7 +618,7 @@ class TDTestCase:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
-
+
def idIllegalNameCheckCase(self, value_type="obj"):
"""
test illegal id name
@@ -669,7 +669,7 @@ class TDTestCase:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
-
+
def illegalTsCheckCase(self, value_type="obj"):
"""
check ts format like 16260068336390us19
@@ -726,7 +726,7 @@ class TDTestCase:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
- tdSql.checkNotEqual(err.errno, 0)
+ tdSql.checkNotEqual(err.errno, 0)
def tagValueLengthCheckCase(self, value_type="obj"):
"""
@@ -770,11 +770,11 @@ class TDTestCase:
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
- #i64
+ #i64
for t4 in [-9223372036854775807, 9223372036854775807]:
input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t4_value=t4, value_type=value_type))
self.resCmp(input_json, stb_name)
-
+
for t4 in [-9223372036854775808, 9223372036854775808]:
input_json = self.genFullTypeJson(tag_value=self.genTagValue(t4_value=t4))[0]
try:
@@ -809,7 +809,7 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
if value_type == "obj":
- # binary
+ # binary
stb_name = tdCom.getLongName(7, "letters")
input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': True, 'type': 'bool'}, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1":{'value': tdCom.getLongName(16374, "letters"), 'type': 'binary'}}}
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -895,7 +895,7 @@ class TDTestCase:
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
- # i64
+ # i64
tdCom.cleanTb()
for value in [-9223372036854775808]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="bigint", value_type=value_type))
@@ -912,7 +912,7 @@ class TDTestCase:
# except SchemalessError as err:
# tdSql.checkNotEqual(err.errno, 0)
- # f32
+ # f32
tdCom.cleanTb()
for value in [-3.4028234663852885981170418348451692544*(10**38), 3.4028234663852885981170418348451692544*(10**38)]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="float", value_type=value_type))
@@ -943,12 +943,12 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# if value_type == "obj":
- # # binary
+ # # binary
# tdCom.cleanTb()
# stb_name = tdCom.getLongName(7, "letters")
# input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(16374, "letters"), 'type': 'binary'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
# self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
-
+
# tdCom.cleanTb()
# input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(16375, "letters"), 'type': 'binary'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
# try:
@@ -972,7 +972,7 @@ class TDTestCase:
# except SchemalessError as err:
# tdSql.checkNotEqual(err.errno, 0)
# elif value_type == "default":
- # # binary
+ # # binary
# tdCom.cleanTb()
# stb_name = tdCom.getLongName(7, "letters")
# if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
@@ -1010,12 +1010,12 @@ class TDTestCase:
# i8 i16 i32 i64 f32 f64
for input_json in [
- self.genFullTypeJson(tag_value=self.genTagValue(t1_value="1s2"))[0],
- self.genFullTypeJson(tag_value=self.genTagValue(t2_value="1s2"))[0],
- self.genFullTypeJson(tag_value=self.genTagValue(t3_value="1s2"))[0],
- self.genFullTypeJson(tag_value=self.genTagValue(t4_value="1s2"))[0],
- self.genFullTypeJson(tag_value=self.genTagValue(t5_value="11.1s45"))[0],
- self.genFullTypeJson(tag_value=self.genTagValue(t6_value="11.1s45"))[0],
+ self.genFullTypeJson(tag_value=self.genTagValue(t1_value="1s2"))[0],
+ self.genFullTypeJson(tag_value=self.genTagValue(t2_value="1s2"))[0],
+ self.genFullTypeJson(tag_value=self.genTagValue(t3_value="1s2"))[0],
+ self.genFullTypeJson(tag_value=self.genTagValue(t4_value="1s2"))[0],
+ self.genFullTypeJson(tag_value=self.genTagValue(t5_value="11.1s45"))[0],
+ self.genFullTypeJson(tag_value=self.genTagValue(t6_value="11.1s45"))[0],
]:
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -1033,7 +1033,7 @@ class TDTestCase:
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
- # check accepted binary and nchar symbols
+ # check accepted binary and nchar symbols
# # * ~!@#$¥%^&*()-+={}|[]、「」:;
for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'):
input_json1 = self.genFullTypeJson(col_value=self.genTsColValue(value=f"abc{symbol}aaa", t_type="binary", value_type=value_type))[0]
@@ -1123,7 +1123,7 @@ class TDTestCase:
check tag count add, stb and tb duplicate
* tag: alter table ...
* col: when update==0 and ts is same, unchange
- * so this case tag&&value will be added,
+ * so this case tag&&value will be added,
* col is added without value when update==0
* col is added with value when update==1
"""
@@ -1139,14 +1139,14 @@ class TDTestCase:
if db_update_tag == 1 :
self.resCmp(input_json, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True)
tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"')
- tdSql.checkData(0, 11, None)
- tdSql.checkData(0, 12, None)
+ tdSql.checkData(0, 11, None)
+ tdSql.checkData(0, 12, None)
else:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"')
- tdSql.checkData(0, 1, True)
- tdSql.checkData(0, 11, None)
- tdSql.checkData(0, 12, None)
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 11, None)
+ tdSql.checkData(0, 12, None)
self.createDb()
def tagAddCheckCase(self, value_type="obj"):
@@ -1216,7 +1216,7 @@ class TDTestCase:
tag_value["t2"] = tdCom.getLongName(1, "letters")
tag_value.pop('id')
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
-
+
tdSql.query(f"select * from {stb_name}")
tdSql.checkRows(2)
if value_type == "obj":
@@ -1313,7 +1313,7 @@ class TDTestCase:
tdSql.checkRows(6)
tdSql.query('select * from st123456')
tdSql.checkRows(5)
-
+
def multiInsertCheckCase(self, count, value_type="obj"):
"""
test multi insert
@@ -1356,7 +1356,7 @@ class TDTestCase:
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
-
+
def blankColInsertCheckCase(self, value_type="obj"):
"""
test blank col insert
@@ -1382,7 +1382,7 @@ class TDTestCase:
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
-
+
def chineseCheckCase(self):
"""
check nchar ---> chinese
@@ -1419,7 +1419,7 @@ class TDTestCase:
{"metric": f'{stb_name}_8', "timestamp": {"value": 1626006833641, "type": "mS"}, "value": {"value": "vozamcts", "type": "nchAr"}, "tags": {"t1": {"value": "vozamcts", "type": "nchAr"}}},
{"metric": f'{stb_name}_9', "timestamp": {"value": 1626006833642, "type": "MS"}, "value": {"value": "vozamcts", "type": "nchAr"}, "tags": {"t1": {"value": "vozamcts", "type": "nchAr"}}},
{"metric": f'{stb_name}_10', "timestamp": {"value": 1626006834, "type": "S"}, "value": {"value": "vozamcts", "type": "nchAr"}, "tags": {"t1": {"value": "vozamcts", "type": "nchAr"}}}]
-
+
for input_sql in input_json_list:
stb_name = input_sql["metric"]
self.resCmp(input_sql, stb_name)
@@ -1514,7 +1514,7 @@ class TDTestCase:
self.multiThreadRun(self.genMultiThreadSeq(input_json))
tdSql.query(f"show tables;")
tdSql.checkRows(5)
-
+
def sStbStbDdataInsertMultiThreadCheckCase(self, value_type="obj"):
"""
thread input same stb tb, different data, result keep first data
@@ -1550,7 +1550,7 @@ class TDTestCase:
tdSql.checkEqual(tb_name, expected_tb_name)
tdSql.query(f"select * from {stb_name};")
tdSql.checkRows(1)
-
+
def sStbStbDdataMtInsertMultiThreadCheckCase(self, value_type="obj"):
"""
thread input same stb tb, different data, minus columes and tags, result keep first data
@@ -1595,7 +1595,7 @@ class TDTestCase:
({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "plgkckpv", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz'),
({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "cujyqvlj", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz'),
({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "twjxisat", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz')]
-
+
self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_m_tag_list))
tdSql.query(f"show tables;")
tdSql.checkRows(2)
@@ -1642,10 +1642,10 @@ class TDTestCase:
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
self.resCmp(input_json, stb_name)
- s_stb_s_tb_d_ts_m_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'tuzsfrom', 'type': 'binary'}, 'id': tb_name}}, 'punftb'),
- ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'llqzvgvw', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'nttjdzgi', 'type': 'binary'}, 'id': tb_name}}, 'punftb'),
- ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'tclbosqc', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'uatpzgpi', 'type': 'binary'}, 'id': tb_name}}, 'punftb'),
- ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'rlpuzodt', 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'cwnpdnng', 'type': 'binary'}, 'id': tb_name}}, 'punftb'),
+ s_stb_s_tb_d_ts_m_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'tuzsfrom', 'type': 'binary'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'llqzvgvw', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'nttjdzgi', 'type': 'binary'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'tclbosqc', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'uatpzgpi', 'type': 'binary'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'rlpuzodt', 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'cwnpdnng', 'type': 'binary'}, 'id': tb_name}}, 'punftb'),
({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'rhnikvfq', 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'afcibyeb', 'type': 'binary'}, 'id': tb_name}}, 'punftb')]
self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_m_tag_list))
tdSql.query(f"show tables;")
@@ -1664,10 +1664,10 @@ class TDTestCase:
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
self.resCmp(input_json, stb_name)
- s_stb_s_tb_d_ts_a_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'tuzsfrom', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'),
- ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'llqzvgvw', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'nttjdzgi', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'),
- ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'tclbosqc', 'type': 'binary'}, 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'uatpzgpi', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'),
- ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'rlpuzodt', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'cwnpdnng', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'),
+ s_stb_s_tb_d_ts_a_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'tuzsfrom', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'llqzvgvw', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'nttjdzgi', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'tclbosqc', 'type': 'binary'}, 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'uatpzgpi', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'rlpuzodt', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'cwnpdnng', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'),
({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'rhnikvfq', 'type': 'binary'}, 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'afcibyeb', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb')]
self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_tag_list))
tdSql.query(f"show tables;")
@@ -1699,10 +1699,10 @@ class TDTestCase:
tdCom.cleanTb()
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
self.resCmp(input_json, stb_name)
- s_stb_d_tb_d_ts_m_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'),
- ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'llqzvgvw', 'type': 'binary'}, 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'),
- ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'tclbosqc', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'),
- ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'rlpuzodt', 'type': 'binary'}, 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'),
+ s_stb_d_tb_d_ts_m_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'llqzvgvw', 'type': 'binary'}, 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'tclbosqc', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'rlpuzodt', 'type': 'binary'}, 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'),
({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'rhnikvfq', 'type': 'binary'}, 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb')]
self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_m_tag_list))
tdSql.query(f"show tables;")
@@ -1713,7 +1713,7 @@ class TDTestCase:
input_json = f'test_nchar 0 L"涛思数据" t0=f,t1=L"涛思数据",t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64'
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
# input_json, stb_name = self.genFullTypeJson()
- # self.resCmp(input_json, stb_name)
+ # self.resCmp(input_json, stb_name)
except SchemalessError as err:
print(err.errno)
diff --git a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py
index 649f0101af..3c47a65746 100644
--- a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py
+++ b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py
@@ -25,19 +25,19 @@ import threading
import platform
import io
if platform.system().lower() == 'windows':
- sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')
+ sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
- self._conn = conn
+ self._conn = conn
self.smlChildTableName_value = "id"
def createDb(self, name="test", db_update_tag=0, protocol=None):
if protocol == "telnet-tcp":
name = "opentsdb_telnet"
-
+
if db_update_tag == 0:
tdSql.execute(f"drop database if exists {name}")
tdSql.execute(f"create database if not exists {name} precision 'us' schemaless 1")
@@ -66,7 +66,7 @@ class TDTestCase:
td_ts = time.strftime("%Y-%m-%d %H:%M:%S.{}".format(ulsec), time.localtime(ts))
return td_ts
#return repr(datetime.datetime.strptime(td_ts, "%Y-%m-%d %H:%M:%S.%f"))
-
+
def dateToTs(self, datetime_input):
return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f")))
@@ -191,7 +191,7 @@ class TDTestCase:
tb_name = ""
td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1])
td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0])
-
+
col_name_list.append('_value')
col_value_list.append(stb_col_value)
@@ -218,7 +218,7 @@ class TDTestCase:
t4="9223372036854775807i64", t5="11.12345f32", t6="22.123456789f64", t7="\"binaryTagValue\"",
t8="L\"ncharTagValue\"", ts="1626006833641",
id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_mixul_tag=None, id_double_tag=None,
- t_add_tag=None, t_mul_tag=None, c_multi_tag=None, c_blank_tag=None, t_blank_tag=None,
+ t_add_tag=None, t_mul_tag=None, c_multi_tag=None, c_blank_tag=None, t_blank_tag=None,
chinese_tag=None, multi_field_tag=None, point_trans_tag=None, protocol=None, tcp_keyword_tag=None):
if stb_name == "":
stb_name = tdCom.getLongName(len=6, mode="letters")
@@ -268,7 +268,7 @@ class TDTestCase:
if protocol == "telnet-tcp":
sql_seq = 'put ' + sql_seq + '\n'
return sql_seq, stb_name
-
+
def genMulTagColStr(self, genType, count=1):
"""
genType must be tag/col
@@ -365,10 +365,10 @@ class TDTestCase:
for t_type in full_type_list:
input_sql, stb_name = self.genFullTypeSql(t0=t_type, protocol=protocol)
self.resCmp(input_sql, stb_name, protocol=protocol)
-
+
def symbolsCheckCase(self, protocol=None):
"""
- check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/?
+ check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/?
"""
'''
please test :
@@ -424,7 +424,7 @@ class TDTestCase:
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
-
+
def idSeqCheckCase(self, protocol=None):
"""
check id.index in tags
@@ -434,7 +434,7 @@ class TDTestCase:
tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, protocol=protocol)
self.resCmp(input_sql, stb_name, protocol=protocol)
-
+
def idLetterCheckCase(self, protocol=None):
"""
check id param
@@ -527,7 +527,7 @@ class TDTestCase:
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
-
+
def illegalTsCheckCase(self):
"""
check ts format like 16260068336390us19
@@ -592,7 +592,7 @@ class TDTestCase:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
- tdSql.checkNotEqual(err.errno, 0)
+ tdSql.checkNotEqual(err.errno, 0)
def tagValueLengthCheckCase(self):
"""
@@ -673,7 +673,7 @@ class TDTestCase:
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
- # f32
+ # f32
tdCom.cleanTb()
for value in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]:
input_sql, stb_name = self.genFullTypeSql(value=value)
@@ -703,12 +703,12 @@ class TDTestCase:
# except SchemalessError as err:
# tdSql.checkNotEqual(err.errno, 0)
- # # # binary
+ # # # binary
# tdCom.cleanTb()
# stb_name = tdCom.getLongName(7, "letters")
# input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16374, "letters")}" t0=t'
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
-
+
# tdCom.cleanTb()
# input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16375, "letters")}" t0=t'
# try:
@@ -748,12 +748,12 @@ class TDTestCase:
# i8 i16 i32 i64 f32 f64
for input_sql in [
- self.genFullTypeSql(value="1s2i8")[0],
+ self.genFullTypeSql(value="1s2i8")[0],
self.genFullTypeSql(value="1s2i16")[0],
self.genFullTypeSql(value="1s2i32")[0],
self.genFullTypeSql(value="1s2i64")[0],
self.genFullTypeSql(value="11.1s45f32")[0],
- self.genFullTypeSql(value="11.1s45f64")[0],
+ self.genFullTypeSql(value="11.1s45f64")[0],
]:
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -761,14 +761,14 @@ class TDTestCase:
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
- # check accepted binary and nchar symbols
+ # check accepted binary and nchar symbols
# # * ~!@#$¥%^&*()-+={}|[]、「」:;
for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'):
input_sql1 = f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc{symbol}aaa" t0=t'
input_sql2 = f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=t t1="abc{symbol}aaa"'
self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.TELNET.value, None)
# self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.TELNET.value, None)
-
+
def blankCheckCase(self):
'''
check blank case
@@ -853,7 +853,7 @@ class TDTestCase:
check tag count add, stb and tb duplicate
* tag: alter table ...
* col: when update==0 and ts is same, unchange
- * so this case tag&&value will be added,
+ * so this case tag&&value will be added,
* col is added without value when update==0
* col is added with value when update==1
"""
@@ -869,14 +869,14 @@ class TDTestCase:
if db_update_tag == 1 :
self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True)
tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"')
- tdSql.checkData(0, 11, None)
- tdSql.checkData(0, 12, None)
+ tdSql.checkData(0, 11, None)
+ tdSql.checkData(0, 12, None)
else:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"')
- tdSql.checkData(0, 1, True)
- tdSql.checkData(0, 11, None)
- tdSql.checkData(0, 12, None)
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 11, None)
+ tdSql.checkData(0, 12, None)
self.createDb()
@tdCom.smlPass
@@ -952,7 +952,7 @@ class TDTestCase:
tdCom.cleanTb()
stb_name = tdCom.getLongName(8, "letters")
tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
-
+
lines = ["st123456 1626006833640 1i64 t1=3i64 t2=4f64 t3=\"t3\"",
"st123456 1626006833641 2i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64",
f'{stb_name} 1626006833642 3i64 t2=5f64 t3=L\"ste\"',
@@ -970,7 +970,7 @@ class TDTestCase:
tdSql.checkRows(6)
tdSql.query('select * from st123456')
tdSql.checkRows(5)
-
+
def multiInsertCheckCase(self, count):
"""
test multi insert
@@ -1014,7 +1014,7 @@ class TDTestCase:
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
-
+
def blankColInsertCheckCase(self):
"""
test blank col insert
@@ -1040,7 +1040,7 @@ class TDTestCase:
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
-
+
def chineseCheckCase(self):
"""
check nchar ---> chinese
@@ -1210,7 +1210,7 @@ class TDTestCase:
self.multiThreadRun(self.genMultiThreadSeq(input_sql))
tdSql.query(f"show tables;")
tdSql.checkRows(5)
-
+
def sStbStbDdataInsertMultiThreadCheckCase(self):
"""
thread input same stb tb, different data, result keep first data
@@ -1248,7 +1248,7 @@ class TDTestCase:
tdSql.checkEqual(tb_name, expected_tb_name)
tdSql.query(f"select * from {stb_name};")
tdSql.checkRows(1) if self.smlChildTableName_value == "ID" else tdSql.checkRows(6)
-
+
def sStbStbDdataMtInsertMultiThreadCheckCase(self):
"""
thread input same stb tb, different data, minus columes and tags, result keep first data
@@ -1466,7 +1466,7 @@ class TDTestCase:
def run(self):
print("running {}".format(__file__))
-
+
try:
self.createDb()
self.runAll()
diff --git a/tests/system-test/1-insert/table_comment.py b/tests/system-test/1-insert/table_comment.py
index 5b85a3964f..465179855e 100644
--- a/tests/system-test/1-insert/table_comment.py
+++ b/tests/system-test/1-insert/table_comment.py
@@ -42,7 +42,7 @@ class TDTestCase:
self.comment_flag_list = [True,False]
def __set_and_alter_comment(self,tb_type='',comment_flag= False):
-
+
column_sql = ''
tag_sql = ''
for k,v in self.column_dict.items():
@@ -78,7 +78,7 @@ class TDTestCase:
tdSql.execute(f'create {operation} {self.stbname} ({column_sql[:-1]}) tags({tag_sql[:-1]}) comment "{comment_info}"')
self.check_comment_info(comment_info,'stable')
self.alter_comment(self.stbname,'stable')
- tdSql.execute(f'drop table {self.stbname}')
+ tdSql.execute(f'drop table {self.stbname}')
elif tb_type == 'child_table':
tdSql.execute(f'create table if not exists {self.stbname} ({column_sql[:-1]}) tags({tag_sql[:-1]})')
if comment_flag == False:
@@ -122,7 +122,7 @@ class TDTestCase:
for flag in comment_flag:
self.__set_and_alter_comment(tb,flag)
tdSql.execute('drop database db')
-
+
def run(self):
self.comment_check_case(self.table_type_list,self.comment_flag_list)
@@ -131,4 +131,4 @@ class TDTestCase:
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/1-insert/table_param_ttl.py b/tests/system-test/1-insert/table_param_ttl.py
index 49d6476d9c..5ef6548789 100644
--- a/tests/system-test/1-insert/table_param_ttl.py
+++ b/tests/system-test/1-insert/table_param_ttl.py
@@ -20,7 +20,7 @@ class TDTestCase:
updatecfgDict = {'ttlUnit':5,'ttlPushInterval':3}
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor())
self.ntbname = 'ntb'
self.stbname = 'stb'
self.tbnum = 10
diff --git a/tests/system-test/1-insert/test_stmt_muti_insert_query.py b/tests/system-test/1-insert/test_stmt_muti_insert_query.py
index 9fb802b96b..7ddc0e60bd 100644
--- a/tests/system-test/1-insert/test_stmt_muti_insert_query.py
+++ b/tests/system-test/1-insert/test_stmt_muti_insert_query.py
@@ -38,7 +38,7 @@ class TDTestCase:
case1: limit offset base function test
case2: offset return valid
'''
- return
+ return
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -64,7 +64,7 @@ class TDTestCase:
# self.create_tables();
self.ts = 1500000000000
- # stop
+ # stop
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
@@ -76,7 +76,7 @@ class TDTestCase:
def newcon(self,host,cfg):
user = "root"
password = "taosdata"
- port =6030
+ port =6030
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
print(con)
return con
@@ -126,14 +126,14 @@ class TDTestCase:
end = datetime.now()
print("elapsed time: ", end - start)
assert stmt.affected_rows == 3
-
+
#query 1
querystmt=conn.statement("select ?,bu from stb1")
queryparam=new_bind_params(1)
print(type(queryparam))
queryparam[0].binary("ts")
querystmt.bind_param(queryparam)
- querystmt.execute()
+ querystmt.execute()
result=querystmt.use_result()
# rows=result.fetch_all()
# print( querystmt.use_result())
@@ -152,7 +152,7 @@ class TDTestCase:
print(type(queryparam1))
queryparam1[0].int(4)
querystmt1.bind_param(queryparam1)
- querystmt1.execute()
+ querystmt1.execute()
result1=querystmt1.use_result()
rows1=result1.fetch_all()
print(rows1)
@@ -176,10 +176,10 @@ class TDTestCase:
host="localhost"
connectstmt=self.newcon(host,config)
self.test_stmt_insert_multi(connectstmt)
- return
+ return
# add case with filename
#
tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/1-insert/test_stmt_set_tbname_tag.py b/tests/system-test/1-insert/test_stmt_set_tbname_tag.py
index a329b475db..321dc88cd7 100644
--- a/tests/system-test/1-insert/test_stmt_set_tbname_tag.py
+++ b/tests/system-test/1-insert/test_stmt_set_tbname_tag.py
@@ -38,7 +38,7 @@ class TDTestCase:
case1: limit offset base function test
case2: offset return valid
'''
- return
+ return
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -64,7 +64,7 @@ class TDTestCase:
# self.create_tables();
self.ts = 1500000000000
- # stop
+ # stop
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
@@ -76,7 +76,7 @@ class TDTestCase:
def newcon(self,host,cfg):
user = "root"
password = "taosdata"
- port =6030
+ port =6030
con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
tdLog.debug(con)
return con
@@ -84,7 +84,7 @@ class TDTestCase:
def stmtExe(self,conn,sql,bindStat):
queryStat=conn.statement("%s"%sql)
queryStat.bind_param(bindStat)
- queryStat.execute()
+ queryStat.execute()
result=queryStat.use_result()
rows=result.fetch_all()
return rows
@@ -101,7 +101,7 @@ class TDTestCase:
ff float, dd double, bb binary(100), nn nchar(100), tt timestamp , vc varchar(100)) tags (t1 timestamp, t2 bool,\
t3 tinyint, t4 tinyint, t5 smallint, t6 int, t7 bigint, t8 tinyint unsigned, t9 smallint unsigned, \
t10 int unsigned, t11 bigint unsigned, t12 float, t13 double, t14 binary(100), t15 nchar(100), t16 timestamp)"%stablename)
-
+
stmt = conn.statement("insert into ? using log tags (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) \
values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
tags = new_bind_params(16)
@@ -140,13 +140,13 @@ class TDTestCase:
params[14].nchar(["涛思数据", None, "a long string with 中文?字符"])
params[15].timestamp([None, None, 1626861392591])
params[16].binary(["涛思数据16", None, None])
-
+
stmt.bind_param_batch(params)
stmt.execute()
assert stmt.affected_rows == 3
- #query all
+ #query all
queryparam=new_bind_params(1)
queryparam[0].int(10)
rows=self.stmtExe(conn,"select * from log where bu < ?",queryparam)
@@ -189,7 +189,7 @@ class TDTestCase:
#query: conversion Functions
queryparam=new_bind_params(1)
- queryparam[0].binary('1232a')
+ queryparam[0].binary('1232a')
rows=self.stmtExe(conn,"select cast( ? as bigint) from log",queryparam)
tdLog.debug("assert 5th case %s"%rows)
assert rows[0][0] == 1232, '5th.1 case is failed'
@@ -210,7 +210,7 @@ class TDTestCase:
tdLog.debug("assert 7th case %s"%rows)
assert rows[0][0] == 1, '7th case is failed'
assert rows[1][0] == 1, '7th case is failed'
-
+
#query: aggregate Functions
queryparam=new_bind_params(1)
queryparam[0].int(123)
@@ -238,7 +238,7 @@ class TDTestCase:
# conn.execute("drop database if exists %s" % dbname)
conn.close()
-
+
except Exception as err:
# conn.execute("drop database if exists %s" % dbname)
conn.close()
@@ -251,10 +251,10 @@ class TDTestCase:
connectstmt=self.newcon(host,config)
self.test_stmt_set_tbname_tag(connectstmt)
- return
+ return
# add case with filename
#
tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/1-insert/update_data.py b/tests/system-test/1-insert/update_data.py
index 29d2a91d28..deff4b42a1 100644
--- a/tests/system-test/1-insert/update_data.py
+++ b/tests/system-test/1-insert/update_data.py
@@ -47,7 +47,7 @@ class TDTestCase:
'col13': f'nchar({self.str_length})',
'col_ts' : 'timestamp'
}
-
+
def data_check(self,tbname,col_name,col_type,value):
tdSql.query(f'select {col_name} from {tbname}')
if col_type.lower() == 'float' or col_type.lower() == 'double':
@@ -121,9 +121,9 @@ class TDTestCase:
tdSql.error(f'insert into {stbname} values({self.ts},{error_value})')
elif col_type.lower() == 'tinyint unsigned':
for error_value in [constant.TINYINT_UN_MIN-1,constant.TINYINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]:
- tdSql.error(f'insert into {tbname} values({self.ts},{error_value})')
+ tdSql.error(f'insert into {tbname} values({self.ts},{error_value})')
if tb_type == 'ctb':
- tdSql.error(f'insert into {stbname} values({self.ts},{error_value})')
+ tdSql.error(f'insert into {stbname} values({self.ts},{error_value})')
elif col_type.lower() == 'smallint unsigned':
for error_value in [constant.SMALLINT_UN_MIN-1,constant.SMALLINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]:
tdSql.error(f'insert into {tbname} values({self.ts},{error_value})')
@@ -136,9 +136,9 @@ class TDTestCase:
tdSql.error(f'insert into {stbname} values({self.ts},{error_value})')
elif col_type.lower() == 'bigint unsigned':
for error_value in [constant.BIGINT_UN_MIN-1,constant.BIGINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]:
- tdSql.error(f'insert into {tbname} values({self.ts},{error_value})')
+ tdSql.error(f'insert into {tbname} values({self.ts},{error_value})')
if tb_type == 'ctb':
- tdSql.error(f'insert into {stbname} values({self.ts},{error_value})')
+ tdSql.error(f'insert into {stbname} values({self.ts},{error_value})')
tdSql.execute(f'drop table {tbname}')
if tb_type == 'ctb':
tdSql.execute(f'drop table {stbname}')
@@ -182,9 +182,9 @@ class TDTestCase:
elif col_type.lower() == 'bigint unsigned':
self.update_and_check_data(tbname,col_name,col_type,up_unbigint,dbname)
elif col_type.lower() == 'bool':
- self.update_and_check_data(tbname,col_name,col_type,up_bool,dbname)
+ self.update_and_check_data(tbname,col_name,col_type,up_bool,dbname)
elif col_type.lower() == 'float':
- self.update_and_check_data(tbname,col_name,col_type,up_float,dbname)
+ self.update_and_check_data(tbname,col_name,col_type,up_float,dbname)
elif col_type.lower() == 'double':
self.update_and_check_data(tbname,col_name,col_type,up_double,dbname)
elif 'binary' in col_type.lower():
@@ -248,10 +248,10 @@ class TDTestCase:
self.update_check()
self.update_check_error()
# i+=1
-
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/1-insert/update_data_muti_rows.py b/tests/system-test/1-insert/update_data_muti_rows.py
index e7da35426a..623dc497ea 100644
--- a/tests/system-test/1-insert/update_data_muti_rows.py
+++ b/tests/system-test/1-insert/update_data_muti_rows.py
@@ -87,7 +87,7 @@ class TDTestCase:
sql += f'({self.ts+i},{values})'
sql += ' '
tdSql.execute(sql)
-
+
def insert_data(self,col_type,tbname,rows,data):
for i in range(rows):
if col_type.lower() == 'tinyint':
@@ -107,16 +107,16 @@ class TDTestCase:
elif col_type.lower() == 'bigint unsigned':
tdSql.execute(f'insert into {tbname} values({self.ts+i},{data["bigint unsigned"]})')
elif col_type.lower() == 'bool':
- tdSql.execute(f'insert into {tbname} values({self.ts+i},{data["bool"]})')
+ tdSql.execute(f'insert into {tbname} values({self.ts+i},{data["bool"]})')
elif col_type.lower() == 'float':
- tdSql.execute(f'insert into {tbname} values({self.ts+i},{data["float"]})')
+ tdSql.execute(f'insert into {tbname} values({self.ts+i},{data["float"]})')
elif col_type.lower() == 'double':
tdSql.execute(f'insert into {tbname} values({self.ts+i},{data["double"]})')
elif 'binary' in col_type.lower():
tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{data['binary']}")''')
elif 'nchar' in col_type.lower():
- tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{data['nchar']}")''')
-
+ tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{data['nchar']}")''')
+
def data_check(self,dbname,tbname,tbnum,rownum,data,col_name,col_type):
if 'binary' in col_type.lower():
self.update_data(dbname,f'{tbname}',tbnum,rownum,data['binary'],col_type)
@@ -170,10 +170,10 @@ class TDTestCase:
self.update_data_ntb()
self.update_data_ctb()
-
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/distribute_agg_apercentile.py b/tests/system-test/2-query/distribute_agg_apercentile.py
index 1fd853f9eb..ad754ad805 100644
--- a/tests/system-test/2-query/distribute_agg_apercentile.py
+++ b/tests/system-test/2-query/distribute_agg_apercentile.py
@@ -141,8 +141,8 @@ class TDTestCase:
query_data = tdSql.queryResult
# nest query for support max
- tdSql.query(f"select apercentile(c2+2,10)+1 from (select max(c1) c2 from {dbname}.stb1)")
- tdSql.checkData(0,0,31.000000000)
+ #tdSql.query(f"select apercentile(c2+2,10)+1 from (select max(c1) c2 from {dbname}.stb1)")
+ #tdSql.checkData(0,0,31.000000000)
tdSql.query(f"select apercentile(c1+2,10)+1 as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)")
tdSql.checkData(0,0,7.560701700)
tdSql.query(f"select apercentile(a+2,10)+1 as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)")
diff --git a/tests/system-test/2-query/function_diff.py b/tests/system-test/2-query/function_diff.py
index 99e87e6cd6..2f463e59a0 100644
--- a/tests/system-test/2-query/function_diff.py
+++ b/tests/system-test/2-query/function_diff.py
@@ -280,7 +280,7 @@ class TDTestCase:
tdSql.error(self.diff_query_form(alias=", min(c1)")) # mix with select function 1
tdSql.error(self.diff_query_form(alias=", top(c1, 5)")) # mix with select function 2
tdSql.error(self.diff_query_form(alias=", spread(c1)")) # mix with calculation function 1
- tdSql.error(self.diff_query_form(alias=", diff(c1)")) # mix with calculation function 2
+ tdSql.query(self.diff_query_form(alias=", diff(c1)")) # mix with calculation function 2
# tdSql.error(self.diff_query_form(alias=" + 2")) # mix with arithmetic 1
tdSql.error(self.diff_query_form(alias=" + avg(c1)")) # mix with arithmetic 2
tdSql.query(self.diff_query_form(alias=", c2")) # mix with other 1
diff --git a/tests/system-test/2-query/irate.py b/tests/system-test/2-query/irate.py
index 09a046d6ef..856006aaf1 100644
--- a/tests/system-test/2-query/irate.py
+++ b/tests/system-test/2-query/irate.py
@@ -213,7 +213,7 @@ class TDTestCase:
tdSql.error("select irate(c1), abs(c1) from ct4 ")
# agg functions mix with agg functions
- tdSql.query("select irate(c1), count(c5) from stb1 partition by tbname ")
+ tdSql.query("select irate(c1), count(c5) from stb1 partition by tbname order by tbname")
tdSql.checkData(0, 0, 0.000000000)
tdSql.checkData(1, 0, 0.000000000)
tdSql.checkData(0, 1, 13)
diff --git a/tests/system-test/2-query/sml.py b/tests/system-test/2-query/sml.py
new file mode 100644
index 0000000000..6cfb9a1dad
--- /dev/null
+++ b/tests/system-test/2-query/sml.py
@@ -0,0 +1,100 @@
+
+import taos
+import sys
+import time
+import socket
+import os
+import threading
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+from util.common import *
+sys.path.append("./7-tmq")
+from tmqCommon import *
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor())
+ #tdSql.init(conn.cursor(), logSql) # output sql.txt file
+
+ def checkFileContent(self):
+ buildPath = tdCom.getBuildPath()
+ cmdStr = '%s/build/bin/sml_test'%(buildPath)
+ tdLog.info(cmdStr)
+ ret = os.system(cmdStr)
+ if ret != 0:
+ tdLog.exit("sml_test failed")
+
+ tdSql.execute('use sml_db')
+ tdSql.query("select * from t_b7d815c9222ca64cdf2614c61de8f211")
+ tdSql.checkRows(1)
+
+ tdSql.checkData(0, 0, '2016-01-01 08:00:07.000')
+ tdSql.checkData(0, 1, 2000)
+ tdSql.checkData(0, 2, 200)
+ tdSql.checkData(0, 3, 15)
+ tdSql.checkData(0, 4, 24.5208)
+ tdSql.checkData(0, 5, 28.09377)
+ tdSql.checkData(0, 6, 428)
+ tdSql.checkData(0, 7, 0)
+ tdSql.checkData(0, 8, 304)
+ tdSql.checkData(0, 9, 0)
+ tdSql.checkData(0, 10, 25)
+
+ tdSql.query("select * from readings")
+ tdSql.checkRows(9)
+
+ tdSql.query("select distinct tbname from readings")
+ tdSql.checkRows(4)
+
+ tdSql.query("select * from t_0799064f5487946e5d22164a822acfc8 order by _ts")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 3, "kk")
+ tdSql.checkData(1, 3, None)
+
+
+ tdSql.query("select distinct tbname from `sys.if.bytes.out`")
+ tdSql.checkRows(2)
+
+ tdSql.query("select * from t_fc70dec6677d4277c5d9799c4da806da order by _ts")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 1.300000000)
+ tdSql.checkData(1, 1,13.000000000)
+
+ tdSql.query("select * from `sys.procs.running`")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 1, 42.000000000)
+ tdSql.checkData(0, 2, "web01")
+
+ tdSql.query("select distinct tbname from `sys.cpu.nice`")
+ tdSql.checkRows(2)
+
+ tdSql.query("select * from `sys.cpu.nice` order by _ts")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 9.000000000)
+ tdSql.checkData(0, 2, "lga")
+ tdSql.checkData(0, 3, "web02")
+ tdSql.checkData(0, 4, None)
+ tdSql.checkData(1, 1, 18.000000000)
+ tdSql.checkData(1, 2, "lga")
+ tdSql.checkData(1, 3, "web01")
+ tdSql.checkData(1, 4, "t1")
+
+ tdSql.query("select * from macylr")
+ tdSql.checkRows(2)
+ return
+
+ def run(self):
+ tdSql.prepare()
+ self.checkFileContent()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/unique.py b/tests/system-test/2-query/unique.py
index 3e6e14be38..ccf7e287e2 100644
--- a/tests/system-test/2-query/unique.py
+++ b/tests/system-test/2-query/unique.py
@@ -457,15 +457,15 @@ class TDTestCase:
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into sub1_bound values ( now()+1s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into sub1_bound values ( now()+2s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into sub1_bound values ( now()+3s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
diff --git a/tests/system-test/6-cluster/5dnode1mnode.py b/tests/system-test/6-cluster/5dnode1mnode.py
index ee2d8afb81..6a8f5a2efb 100644
--- a/tests/system-test/6-cluster/5dnode1mnode.py
+++ b/tests/system-test/6-cluster/5dnode1mnode.py
@@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -18,7 +18,7 @@ class MyDnodes(TDDnodes):
super(MyDnodes,self).__init__()
self.dnodes = dnodes_lists # dnode must be TDDnode instance
self.simDeployed = False
-
+
class TDTestCase:
noConn = True
def init(self,conn ,logSql):
@@ -29,7 +29,7 @@ class TDTestCase:
self.host=self.master_dnode.cfgDict["fqdn"]
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
tdSql.init(conn1.cursor())
-
+
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -46,12 +46,12 @@ class TDTestCase:
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
-
- def depoly_cluster(self ,dnodes_nums):
+
+ def depoly_cluster(self ,dnodes_nums):
testCluster = False
- valgrind = 0
+ valgrind = 0
hostname = socket.gethostname()
dnodes = []
start_port = 6030
@@ -63,7 +63,7 @@ class TDTestCase:
dnode.addExtraCfg("monitorFqdn", hostname)
dnode.addExtraCfg("monitorPort", 7043)
dnodes.append(dnode)
-
+
self.TDDnodes = MyDnodes(dnodes)
self.TDDnodes.init("")
self.TDDnodes.setTestCluster(testCluster)
@@ -71,11 +71,11 @@ class TDTestCase:
self.TDDnodes.stopAll()
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.deploy(dnode.index,{})
-
+
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.starttaosd(dnode.index)
- # create cluster
+ # create cluster
for dnode in self.TDDnodes.dnodes[1:]:
# print(dnode.cfgDict)
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
@@ -84,7 +84,7 @@ class TDTestCase:
cmd = f"{self.getBuildPath()}/build/bin/taos -h {dnode_first_host} -P {dnode_first_port} -s \"create dnode \\\"{dnode_id}\\\"\""
print(cmd)
os.system(cmd)
-
+
time.sleep(2)
tdLog.info(" create cluster done! ")
@@ -94,7 +94,7 @@ class TDTestCase:
tdSql.checkData(4,1,'%s:6430'%self.host)
tdSql.checkData(0,4,'ready')
tdSql.checkData(4,4,'ready')
- tdSql.query("show mnodes;")
+ tdSql.query("show mnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@@ -120,7 +120,7 @@ class TDTestCase:
)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
-
+
tdSql.query('show databases;')
tdSql.checkData(2,5,'off')
tdSql.error("alter database db strict 'off'")
@@ -135,7 +135,7 @@ class TDTestCase:
return taos.connect(host=host, port=int(port), config=config_dir)
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.five_dnode_one_mnode()
@@ -145,4 +145,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/5dnode2mnode.py b/tests/system-test/6-cluster/5dnode2mnode.py
index e4df9df4ea..59d4f5f18f 100644
--- a/tests/system-test/6-cluster/5dnode2mnode.py
+++ b/tests/system-test/6-cluster/5dnode2mnode.py
@@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -17,8 +17,8 @@ import subprocess
sys.path.append("./6-cluster")
from clusterCommonCreate import *
-from clusterCommonCheck import *
-
+from clusterCommonCheck import *
+
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
@@ -48,7 +48,7 @@ class TDTestCase:
tdSql.checkData(4,1,'%s:6430'%self.host)
tdSql.checkData(0,4,'ready')
tdSql.checkData(4,4,'ready')
- tdSql.query("show mnodes;")
+ tdSql.query("show mnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@@ -63,7 +63,7 @@ class TDTestCase:
while count < 10:
time.sleep(1)
tdSql.query("show mnodes;")
- tdSql.checkRows(2)
+ tdSql.checkRows(2)
if tdSql.queryResult[0][2]=='leader' :
if tdSql.queryResult[1][2]=='follower':
print("two mnodes is ready")
@@ -73,7 +73,7 @@ class TDTestCase:
print("two mnodes is not ready in 10s ")
# fisrt check statut ready
-
+
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@@ -106,7 +106,7 @@ class TDTestCase:
clusterComCheck.checkDnodes(5)
# restart all taosd
tdDnodes=cluster.dnodes
-
+
# stop follower
tdLog.info("stop follower")
tdDnodes[1].stoptaosd()
@@ -118,7 +118,7 @@ class TDTestCase:
tdDnodes[1].starttaosd()
if clusterComCheck.checkMnodeStatus(2) :
print("both mnodes are ready")
-
+
# stop leader
tdLog.info("stop leader")
tdDnodes[0].stoptaosd()
@@ -133,7 +133,7 @@ class TDTestCase:
if clusterComCheck.checkMnodeStatus(2) :
print("both mnodes are ready")
- def run(self):
+ def run(self):
self.five_dnode_two_mnode()
@@ -142,4 +142,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/5dnode3mnodeAdd1Ddnoe.py b/tests/system-test/6-cluster/5dnode3mnodeAdd1Ddnoe.py
index 7b86ee0067..af78dfae9d 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeAdd1Ddnoe.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeAdd1Ddnoe.py
@@ -3,7 +3,7 @@ from numpy import row_stack
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -13,13 +13,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
-from clusterCommonCheck import clusterComCheck
+from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
-import threading
+import threading
import time
import inspect
import ctypes
@@ -57,7 +57,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
- # """if it returns a number greater than one, you're in trouble,
+ # """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@@ -68,7 +68,7 @@ class TDTestCase:
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
-
+
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@@ -118,7 +118,7 @@ class TDTestCase:
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
rowsall=rowsPerStb*paraDict['stbNumbers']
dbNumbers = 1
-
+
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@@ -133,7 +133,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
- # add some error operations and
+ # add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@@ -142,7 +142,7 @@ class TDTestCase:
# create database and stable
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
- tdLog.info("Take turns stopping Mnodes ")
+ tdLog.info("Take turns stopping Mnodes ")
tdDnodes=cluster.dnodes
# dnode6=cluster.addDnode(6)
@@ -166,7 +166,7 @@ class TDTestCase:
newTdSql=tdCom.newTdSql()
threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
for tr in threads:
- tr.start()
+ tr.start()
dnode6Port=int(6030+5*100)
tdSql.execute("create dnode '%s:%d'"%(hostname,dnode6Port))
clusterComCheck.checkDnodes(dnodeNumbers)
@@ -179,7 +179,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@@ -191,20 +191,20 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
tdLog.info("123")
else:
print("456")
-
+
self.stopThread(threads)
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
-
+
clusterComCheck.checkDnodes(dnodeNumbers)
clusterComCheck.checkDbRows(dbNumbers)
@@ -217,7 +217,7 @@ class TDTestCase:
stableName= '%s_%d'%(paraDict['stbName'],i)
tdSql.query("select * from %s"%stableName)
tdSql.checkRows(rowsPerStb)
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='dnode')
@@ -226,4 +226,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/5dnode3mnodeDrop.py b/tests/system-test/6-cluster/5dnode3mnodeDrop.py
index e81d5295f2..32f222dacb 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeDrop.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeDrop.py
@@ -3,7 +3,7 @@ from paramiko import HostKeys
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -19,7 +19,7 @@ class MyDnodes(TDDnodes):
super(MyDnodes,self).__init__()
self.dnodes = dnodes_lists # dnode must be TDDnode instance
self.simDeployed = False
-
+
class TDTestCase:
def init(self,conn ,logSql):
@@ -48,7 +48,7 @@ class TDTestCase:
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
-
+
def insert_data(self,count):
# fisrt add data : db\stable\childtable\general table
for couti in count:
@@ -70,10 +70,10 @@ class TDTestCase:
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
- def depoly_cluster(self ,dnodes_nums):
+ def depoly_cluster(self ,dnodes_nums):
testCluster = False
- valgrind = 0
+ valgrind = 0
hostname = socket.gethostname()
tdLog.debug(hostname)
dnodes = []
@@ -88,7 +88,7 @@ class TDTestCase:
dnode.addExtraCfg("monitorPort", 7043)
dnode.addExtraCfg("secondEp", f"{hostname}:{start_port_sec}")
dnodes.append(dnode)
-
+
self.TDDnodes = MyDnodes(dnodes)
self.TDDnodes.init("")
self.TDDnodes.setTestCluster(testCluster)
@@ -96,11 +96,11 @@ class TDTestCase:
self.TDDnodes.stopAll()
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.deploy(dnode.index,{})
-
+
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.starttaosd(dnode.index)
- # create cluster
+ # create cluster
for dnode in self.TDDnodes.dnodes[1:]:
# tdLog.debug(dnode.cfgDict)
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
@@ -109,7 +109,7 @@ class TDTestCase:
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
tdLog.debug(cmd)
os.system(cmd)
-
+
time.sleep(2)
tdLog.info(" create cluster with %d dnode done! " %dnodes_nums)
@@ -118,8 +118,8 @@ class TDTestCase:
while count < 10:
time.sleep(1)
tdSql.query("show mnodes;")
- if tdSql.checkRows(3) :
- tdLog.debug("mnode is three nodes")
+ if tdSql.checkRows(3) :
+ tdLog.debug("mnode is three nodes")
if tdSql.queryResult[0][2]=='leader' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='follower':
@@ -129,20 +129,20 @@ class TDTestCase:
if tdSql.queryResult[1][2]=='leader':
if tdSql.queryResult[2][2]=='follower':
tdLog.debug("three mnodes is ready in 10s")
- break
+ break
elif tdSql.queryResult[0][2]=='follower' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='leader':
tdLog.debug("three mnodes is ready in 10s")
- break
+ break
count+=1
else:
tdLog.debug(tdSql.queryResult)
tdLog.debug("three mnodes is not ready in 10s ")
return -1
- tdSql.query("show mnodes;")
- tdSql.checkRows(3)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
@@ -169,11 +169,11 @@ class TDTestCase:
count+=1
else:
tdLog.debug("stop mnodes on dnode 2 failed in 10s ")
- return -1
+ return -1
tdSql.error("drop mnode on dnode 1;")
- tdSql.query("show mnodes;")
- tdSql.checkRows(3)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'offline')
tdSql.checkData(0,3,'ready')
@@ -200,8 +200,8 @@ class TDTestCase:
return -1
tdSql.error("drop mnode on dnode 2;")
- tdSql.query("show mnodes;")
- tdSql.checkRows(3)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@@ -229,8 +229,8 @@ class TDTestCase:
tdLog.debug("stop mnodes on dnode 3 failed in 10s")
return -1
tdSql.error("drop mnode on dnode 3;")
- tdSql.query("show mnodes;")
- tdSql.checkRows(3)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@@ -249,8 +249,8 @@ class TDTestCase:
tdSql.checkData(4,1,'%s:6430'%self.host)
tdSql.checkData(0,4,'ready')
tdSql.checkData(4,4,'ready')
- tdSql.query("show mnodes;")
- tdSql.checkRows(1)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(1)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@@ -270,8 +270,8 @@ class TDTestCase:
tdSql.query("show dnodes;")
tdLog.debug(tdSql.queryResult)
- # drop follower of mnode
- dropcount =0
+ # drop follower of mnode
+ dropcount =0
while dropcount <= 10:
for i in range(1,3):
tdLog.debug("drop mnode on dnode %d"%(i+1))
@@ -306,7 +306,7 @@ class TDTestCase:
return taos.connect(host=host, port=int(port), config=config_dir)
- def run(self):
+ def run(self):
# tdLog.debug(self.master_dnode.cfgDict)
self.buildcluster(5)
self.five_dnode_three_mnode()
@@ -316,4 +316,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/5dnode3mnodeDropInsert.py b/tests/system-test/6-cluster/5dnode3mnodeDropInsert.py
index cfa3920604..106bb26264 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeDropInsert.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeDropInsert.py
@@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -13,7 +13,7 @@ import time
import socket
import subprocess
from multiprocessing import Process
-import threading
+import threading
import time
import inspect
import ctypes
@@ -37,7 +37,7 @@ class TDTestCase:
self.host=self.master_dnode.cfgDict["fqdn"]
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
tdSql.init(conn1.cursor())
-
+
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -63,7 +63,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
- # """if it returns a number greater than one, you're in trouble,
+ # """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@@ -74,7 +74,7 @@ class TDTestCase:
def createDbTbale(self,dbcountStart,dbcountStop,stbname,chilCount):
# fisrt add data : db\stable\childtable\general table
-
+
for couti in range(dbcountStart,dbcountStop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@@ -98,7 +98,7 @@ class TDTestCase:
def insertTabaleData(self,dbcountStart,dbcountStop,stbname,chilCount,ts_start,rowCount):
# insert data : create childtable and data
-
+
for couti in range(dbcountStart,dbcountStop):
tdSql.execute("use db%d" %couti)
pre_insert = "insert into "
@@ -115,7 +115,7 @@ class TDTestCase:
# print(sql)
tdSql.execute(sql)
sql = "insert into %s_%d values " %(stbname,i)
- # end sql
+ # end sql
if sql != pre_insert:
# print(sql)
print(len(sql))
@@ -134,13 +134,13 @@ class TDTestCase:
for i in range(stableCount):
tdSql.query("select count(*) from %s%d"%(stbname,i))
tdSql.checkData(0,0,rowsPerSTable)
- return
-
+ return
- def depoly_cluster(self ,dnodes_nums):
+
+ def depoly_cluster(self ,dnodes_nums):
testCluster = False
- valgrind = 0
+ valgrind = 0
hostname = socket.gethostname()
dnodes = []
start_port = 6030
@@ -154,7 +154,7 @@ class TDTestCase:
dnode.addExtraCfg("monitorPort", 7043)
dnode.addExtraCfg("secondEp", f"{hostname}:{start_port_sec}")
dnodes.append(dnode)
-
+
self.TDDnodes = MyDnodes(dnodes)
self.TDDnodes.init("")
self.TDDnodes.setTestCluster(testCluster)
@@ -162,11 +162,11 @@ class TDTestCase:
self.TDDnodes.stopAll()
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.deploy(dnode.index,{})
-
+
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.starttaosd(dnode.index)
- # create cluster
+ # create cluster
for dnode in self.TDDnodes.dnodes[1:]:
# print(dnode.cfgDict)
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
@@ -175,7 +175,7 @@ class TDTestCase:
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
print(cmd)
os.system(cmd)
-
+
time.sleep(2)
tdLog.info(" create cluster with %d dnode done! " %dnodes_nums)
@@ -185,8 +185,8 @@ class TDTestCase:
time.sleep(1)
statusReadyBumber=0
tdSql.query("show dnodes;")
- if tdSql.checkRows(dnodenumber) :
- print("dnode is %d nodes"%dnodenumber)
+ if tdSql.checkRows(dnodenumber) :
+ print("dnode is %d nodes"%dnodenumber)
for i in range(dnodenumber):
if tdSql.queryResult[i][4] !='ready' :
status=tdSql.queryResult[i][4]
@@ -203,15 +203,15 @@ class TDTestCase:
else:
print("%d mnodes is not ready in 10s "%dnodenumber)
return False
-
+
def check3mnode(self):
count=0
while count < 10:
time.sleep(1)
tdSql.query("show mnodes;")
- if tdSql.checkRows(3) :
- print("mnode is three nodes")
+ if tdSql.checkRows(3) :
+ print("mnode is three nodes")
if tdSql.queryResult[0][2]=='leader' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='follower':
@@ -221,19 +221,19 @@ class TDTestCase:
if tdSql.queryResult[1][2]=='leader':
if tdSql.queryResult[2][2]=='follower':
print("three mnodes is ready in 10s")
- break
+ break
elif tdSql.queryResult[0][2]=='follower' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='leader':
print("three mnodes is ready in 10s")
- break
+ break
count+=1
else:
print("three mnodes is not ready in 10s ")
return -1
- tdSql.query("show mnodes;")
- tdSql.checkRows(3)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
@@ -263,8 +263,8 @@ class TDTestCase:
return -1
tdSql.error("drop mnode on dnode 1;")
- tdSql.query("show mnodes;")
- tdSql.checkRows(3)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'offline')
tdSql.checkData(0,3,'ready')
@@ -291,8 +291,8 @@ class TDTestCase:
return -1
tdSql.error("drop mnode on dnode 2;")
- tdSql.query("show mnodes;")
- tdSql.checkRows(3)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@@ -320,8 +320,8 @@ class TDTestCase:
print("stop mnodes on dnode 3 failed in 10s")
return -1
tdSql.error("drop mnode on dnode 3;")
- tdSql.query("show mnodes;")
- tdSql.checkRows(3)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@@ -348,8 +348,8 @@ class TDTestCase:
tdSql.checkData(4,1,'%s:6430'%self.host)
tdSql.checkData(0,4,'ready')
tdSql.checkData(4,4,'ready')
- tdSql.query("show mnodes;")
- tdSql.checkRows(1)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(1)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@@ -364,7 +364,7 @@ class TDTestCase:
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
print(tdSql.queryResult)
- tdLog.debug("stop all of mnode ")
+ tdLog.debug("stop all of mnode ")
# drop follower of mnode and insert data
self.createDbTbale(dbcountStart, dbcountStop,stbname,tablesPerStb)
@@ -378,7 +378,7 @@ class TDTestCase:
rowsPerTable))
threads.start()
- dropcount =0
+ dropcount =0
while dropcount <= 10:
for i in range(1,3):
tdLog.debug("drop mnode on dnode %d"%(i+1))
@@ -415,7 +415,7 @@ class TDTestCase:
return taos.connect(host=host, port=int(port), config=config_dir)
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.buildcluster(5)
self.five_dnode_three_mnode(5)
@@ -425,4 +425,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py b/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py
index 48ee90fad2..65d525cfd1 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py
@@ -3,7 +3,7 @@ from numpy import row_stack
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -13,13 +13,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
-from clusterCommonCheck import clusterComCheck
+from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
-import threading
+import threading
import time
import inspect
import ctypes
@@ -57,7 +57,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
- # """if it returns a number greater than one, you're in trouble,
+ # """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@@ -68,7 +68,7 @@ class TDTestCase:
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
-
+
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@@ -112,7 +112,7 @@ class TDTestCase:
}
username="user1"
passwd="123"
-
+
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
@@ -120,7 +120,7 @@ class TDTestCase:
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
rowsall=rowsPerStb*paraDict['stbNumbers']
dbNumbers = 1
-
+
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@@ -135,7 +135,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
- # add some error operations and
+ # add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@@ -162,10 +162,10 @@ class TDTestCase:
for i in range(tdSql.queryRows):
if tdSql.queryResult[i][0] == "%s"%username :
tdLog.info("create user:%s successfully"%username)
-
+
# # create database and stable
# clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
- # tdLog.info("Take turns stopping Mnodes ")
+ # tdLog.info("Take turns stopping Mnodes ")
# tdDnodes=cluster.dnodes
# stopcount =0
@@ -197,7 +197,7 @@ class TDTestCase:
# tdDnodes[i].stoptaosd()
# # sleep(10)
# tdDnodes[i].starttaosd()
- # # sleep(10)
+ # # sleep(10)
# elif stopRole == "vnode":
# for i in range(vnodeNumbers):
# tdDnodes[i+mnodeNums].stoptaosd()
@@ -209,7 +209,7 @@ class TDTestCase:
# tdDnodes[i].stoptaosd()
# # sleep(10)
# tdDnodes[i].starttaosd()
- # # sleep(10)
+ # # sleep(10)
# # dnodeNumbers don't include database of schema
# if clusterComCheck.checkDnodes(dnodeNumbers):
@@ -220,7 +220,7 @@ class TDTestCase:
# tdLog.exit("one or more of dnodes failed to start ")
# # self.check3mnode()
# stopcount+=1
-
+
# clusterComCheck.checkDnodes(dnodeNumbers)
# clusterComCheck.checkDbRows(dbNumbers)
@@ -234,7 +234,7 @@ class TDTestCase:
# # tdSql.query("select * from %s"%stableName)
# # tdSql.checkRows(rowsPerStb)
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode')
@@ -243,4 +243,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py b/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py
index 8ae09dce16..8a0c90966b 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py
@@ -3,7 +3,7 @@ from numpy import row_stack
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -13,13 +13,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
-from clusterCommonCheck import clusterComCheck
+from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
-import threading
+import threading
import time
import inspect
import ctypes
@@ -57,7 +57,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
- # """if it returns a number greater than one, you're in trouble,
+ # """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@@ -68,7 +68,7 @@ class TDTestCase:
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
-
+
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@@ -110,7 +110,7 @@ class TDTestCase:
"rowsPerTbl": 100,
"batchNum": 5000
}
-
+
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
@@ -118,7 +118,7 @@ class TDTestCase:
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
rowsall=rowsPerStb*paraDict['stbNumbers']
dbNumbers = 1
-
+
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@@ -133,7 +133,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
- # add some error operations and
+ # add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@@ -142,7 +142,7 @@ class TDTestCase:
# create database and stable
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
- tdLog.info("Take turns stopping Mnodes ")
+ tdLog.info("Take turns stopping Mnodes ")
tdDnodes=cluster.dnodes
stopcount =0
@@ -174,7 +174,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@@ -186,7 +186,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
@@ -197,7 +197,7 @@ class TDTestCase:
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
-
+
clusterComCheck.checkDnodes(dnodeNumbers)
clusterComCheck.checkDbRows(dbNumbers)
@@ -211,7 +211,7 @@ class TDTestCase:
# tdSql.query("select * from %s"%stableName)
# tdSql.checkRows(rowsPerStb)
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode')
@@ -220,4 +220,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py b/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py
index 87d108cdeb..5f02efc7ce 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py
@@ -3,7 +3,7 @@ from numpy import row_stack
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -13,13 +13,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
-from clusterCommonCheck import clusterComCheck
+from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
-import threading
+import threading
import time
import inspect
import ctypes
@@ -57,7 +57,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
- # """if it returns a number greater than one, you're in trouble,
+ # """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@@ -68,7 +68,7 @@ class TDTestCase:
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
-
+
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@@ -110,7 +110,7 @@ class TDTestCase:
"rowsPerTbl": 100,
"batchNum": 5000
}
-
+
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
@@ -118,7 +118,7 @@ class TDTestCase:
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
rowsall=rowsPerStb*paraDict['stbNumbers']
dbNumbers = 1
-
+
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@@ -133,7 +133,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
- # add some error operations and
+ # add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@@ -142,7 +142,7 @@ class TDTestCase:
# create database and stable
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
- tdLog.info("Take turns stopping Mnodes ")
+ tdLog.info("Take turns stopping Mnodes ")
tdDnodes=cluster.dnodes
stopcount =0
@@ -173,7 +173,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@@ -185,7 +185,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
@@ -196,10 +196,10 @@ class TDTestCase:
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
-
+
for tr in threads:
tr.join()
-
+
clusterComCheck.checkDnodes(dnodeNumbers)
clusterComCheck.checkDbRows(dbNumbers)
# clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
@@ -212,7 +212,7 @@ class TDTestCase:
# tdSql.query("select * from %s"%stableName)
# tdSql.checkRows(rowsPerStb)
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode')
@@ -221,4 +221,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py
index 3c0e479030..6debffdeb8 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py
@@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -12,13 +12,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
-from clusterCommonCheck import clusterComCheck
+from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
-import threading
+import threading
import time
import inspect
import ctypes
@@ -56,7 +56,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
- # """if it returns a number greater than one, you're in trouble,
+ # """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@@ -67,7 +67,7 @@ class TDTestCase:
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
-
+
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@@ -107,13 +107,13 @@ class TDTestCase:
'ctbPrefix': 'ctb',
'ctbNum': 1,
}
-
+
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allDbNumbers=(paraDict['dbNumbers']*restartNumbers)
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
-
+
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@@ -128,7 +128,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
- # add some error operations and
+ # add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@@ -149,7 +149,7 @@ class TDTestCase:
for tr in threads:
tr.start()
- tdLog.info("Take turns stopping Mnodes ")
+ tdLog.info("Take turns stopping Mnodes ")
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
@@ -157,7 +157,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@@ -169,7 +169,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
@@ -180,7 +180,7 @@ class TDTestCase:
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
-
+
for tr in threads:
tr.join()
tdLog.info("check dnode number:")
@@ -196,7 +196,7 @@ class TDTestCase:
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=10,stopRole='dnode')
@@ -205,4 +205,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py
index f685ef2f1a..919c560330 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py
@@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -12,13 +12,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
-from clusterCommonCheck import clusterComCheck
+from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
-import threading
+import threading
import time
import inspect
import ctypes
@@ -56,7 +56,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
- # """if it returns a number greater than one, you're in trouble,
+ # """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@@ -81,13 +81,13 @@ class TDTestCase:
'ctbPrefix': 'ctb',
'ctbNum': 1,
}
-
+
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
dbNumbers = 1
-
+
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@@ -102,7 +102,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
- # add some error operations and
+ # add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@@ -111,7 +111,7 @@ class TDTestCase:
# create database and stable
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
- tdLog.info("Take turns stopping Mnodes ")
+ tdLog.info("Take turns stopping Mnodes ")
tdDnodes=cluster.dnodes
stopcount =0
@@ -130,7 +130,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@@ -142,19 +142,19 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
tdLog.info("123")
else:
print("456")
-
+
self.stopThread(threads)
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
-
+
for tr in threads:
tr.join()
clusterComCheck.checkDnodes(dnodeNumbers)
@@ -169,7 +169,7 @@ class TDTestCase:
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='dnode')
@@ -178,4 +178,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeInsertData.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeInsertData.py
index 8f99ef0b5c..562721581d 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeInsertData.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeInsertData.py
@@ -3,7 +3,7 @@ from numpy import row_stack
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -13,13 +13,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
-from clusterCommonCheck import clusterComCheck
+from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
-import threading
+import threading
import time
import inspect
import ctypes
@@ -57,7 +57,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
- # """if it returns a number greater than one, you're in trouble,
+ # """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@@ -68,7 +68,7 @@ class TDTestCase:
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
-
+
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@@ -110,7 +110,7 @@ class TDTestCase:
"rowsPerTbl": 10000,
"batchNum": 5000
}
-
+
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
@@ -118,7 +118,7 @@ class TDTestCase:
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
rowsall=rowsPerStb*paraDict['stbNumbers']
dbNumbers = 1
-
+
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@@ -133,7 +133,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
- # add some error operations and
+ # add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@@ -142,7 +142,7 @@ class TDTestCase:
# create database and stable
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
- tdLog.info("Take turns stopping Mnodes ")
+ tdLog.info("Take turns stopping Mnodes ")
tdDnodes=cluster.dnodes
stopcount =0
@@ -171,7 +171,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@@ -183,19 +183,19 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
tdLog.info("123")
else:
print("456")
-
+
self.stopThread(threads)
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
-
+
for tr in threads:
tr.join()
clusterComCheck.checkDnodes(dnodeNumbers)
@@ -209,7 +209,7 @@ class TDTestCase:
stableName= '%s_%d'%(paraDict['stbName'],i)
tdSql.query("select * from %s"%stableName)
tdSql.checkRows(rowsPerStb)
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='dnode')
@@ -218,4 +218,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py
index d39bae68f9..e9b032c003 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py
@@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -12,13 +12,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
-from clusterCommonCheck import clusterComCheck
+from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
-import threading
+import threading
import time
import inspect
import ctypes
@@ -56,7 +56,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
- # """if it returns a number greater than one, you're in trouble,
+ # """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@@ -82,13 +82,13 @@ class TDTestCase:
'ctbPrefix': 'ctb',
'ctbNum': 1,
}
-
+
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allDbNumbers=(paraDict['dbNumbers']*restartNumbers)
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
-
+
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@@ -105,14 +105,14 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
- # add some error operations and
+ # add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
print(tdSql.queryResult)
clusterComCheck.checkDnodes(dnodeNumbers)
- tdLog.info("create database and stable")
+ tdLog.info("create database and stable")
tdDnodes=cluster.dnodes
stopcount =0
threads=[]
@@ -124,7 +124,7 @@ class TDTestCase:
for tr in threads:
tr.start()
- tdLog.info("Take turns stopping Mnodes ")
+ tdLog.info("Take turns stopping Mnodes ")
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
@@ -132,7 +132,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@@ -144,7 +144,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
@@ -155,7 +155,7 @@ class TDTestCase:
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
-
+
for tr in threads:
tr.join()
tdLog.info("check dnode number:")
@@ -170,7 +170,7 @@ class TDTestCase:
# clusterComCheck.checkDb(paraDict['dbNumbers'],restartNumbers,dbNameIndex = '%s%d'%(paraDict["dbName"],i))
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=10,stopRole='mnode')
@@ -179,4 +179,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDbRep3.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDbRep3.py
index 1fa77d3bfd..99efabd8ea 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDbRep3.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDbRep3.py
@@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -12,13 +12,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
-from clusterCommonCheck import clusterComCheck
+from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
-import threading
+import threading
import time
import inspect
import ctypes
@@ -56,7 +56,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
- # """if it returns a number greater than one, you're in trouble,
+ # """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@@ -82,13 +82,13 @@ class TDTestCase:
'ctbPrefix': 'ctb',
'ctbNum': 1,
}
-
+
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allDbNumbers=(paraDict['dbNumbers']*restartNumbers)
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
-
+
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@@ -103,14 +103,14 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
- # add some error operations and
+ # add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
print(tdSql.queryResult)
clusterComCheck.checkDnodes(dnodeNumbers)
- tdLog.info("create database and stable")
+ tdLog.info("create database and stable")
tdDnodes=cluster.dnodes
stopcount =0
threads=[]
@@ -122,7 +122,7 @@ class TDTestCase:
for tr in threads:
tr.start()
- tdLog.info("Take turns stopping Mnodes ")
+ tdLog.info("Take turns stopping Mnodes ")
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
@@ -130,7 +130,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@@ -142,7 +142,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
@@ -153,14 +153,14 @@ class TDTestCase:
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
-
+
for tr in threads:
tr.join()
tdLog.info("check dnode number:")
clusterComCheck.checkDnodes(dnodeNumbers)
tdSql.query("show databases")
tdLog.debug("we find %d databases but exepect to create %d databases "%(tdSql.queryRows-2,allDbNumbers-2))
-
+
# tdLog.info("check DB Rows:")
# clusterComCheck.checkDbRows(allDbNumbers)
# tdLog.info("check DB Status on by on")
@@ -168,7 +168,7 @@ class TDTestCase:
# clusterComCheck.checkDb(paraDict['dbNumbers'],restartNumbers,dbNameIndex = '%s%d'%(paraDict["dbName"],i))
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='mnode')
@@ -177,4 +177,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py
index 3a10427664..d8c9b9e54d 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py
@@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -12,13 +12,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
-from clusterCommonCheck import clusterComCheck
+from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
-import threading
+import threading
import time
import inspect
import ctypes
@@ -56,7 +56,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
- # """if it returns a number greater than one, you're in trouble,
+ # """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@@ -67,7 +67,7 @@ class TDTestCase:
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
-
+
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@@ -106,13 +106,13 @@ class TDTestCase:
'ctbPrefix': 'ctb',
'ctbNum': 1,
}
-
+
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
dbNumbers = 1
-
+
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@@ -127,7 +127,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
- # add some error operations and
+ # add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@@ -148,7 +148,7 @@ class TDTestCase:
for tr in threads:
tr.start()
- tdLog.info("Take turns stopping Mnodes ")
+ tdLog.info("Take turns stopping Mnodes ")
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
@@ -157,7 +157,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@@ -169,19 +169,19 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
tdLog.info("123")
else:
print("456")
-
+
self.stopThread(threads)
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
-
+
for tr in threads:
tr.join()
clusterComCheck.checkDnodes(dnodeNumbers)
@@ -195,7 +195,7 @@ class TDTestCase:
# tdSql.checkRows(allStbNumbers)
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='mnode')
@@ -204,4 +204,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py
index da32d1b4a8..706c8ad9d5 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py
@@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -12,13 +12,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
-from clusterCommonCheck import clusterComCheck
+from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
-import threading
+import threading
import time
import inspect
import ctypes
@@ -56,7 +56,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
- # """if it returns a number greater than one, you're in trouble,
+ # """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@@ -82,13 +82,13 @@ class TDTestCase:
'ctbPrefix': 'ctb',
'ctbNum': 1,
}
-
+
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allDbNumbers=(paraDict['dbNumbers']*restartNumbers)
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
-
+
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@@ -103,7 +103,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
- # add some error operations and
+ # add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@@ -124,7 +124,7 @@ class TDTestCase:
for tr in threads:
tr.start()
- tdLog.info("Take turns stopping Mnodes ")
+ tdLog.info("Take turns stopping Mnodes ")
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
@@ -132,7 +132,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@@ -144,7 +144,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
@@ -155,7 +155,7 @@ class TDTestCase:
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
-
+
for tr in threads:
tr.join()
clusterComCheck.checkDnodes(dnodeNumbers)
@@ -169,7 +169,7 @@ class TDTestCase:
# clusterComCheck.checkDb(paraDict['dbNumbers'],restartNumbers,dbNameIndex = '%s%d'%(paraDict["dbName"],i))
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=10,stopRole='vnode')
@@ -178,4 +178,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py
index 5c9e3587c4..c9f7cdacaf 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py
@@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -12,13 +12,13 @@ from util.dnodes import TDDnode
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
-from clusterCommonCheck import clusterComCheck
+from clusterCommonCheck import clusterComCheck
import time
import socket
import subprocess
from multiprocessing import Process
-import threading
+import threading
import time
import inspect
import ctypes
@@ -31,7 +31,7 @@ class TDTestCase:
tdSql.init(conn.cursor())
self.host = socket.gethostname()
print(tdSql)
-
+
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -56,7 +56,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
- # """if it returns a number greater than one, you're in trouble,
+ # """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@@ -67,7 +67,7 @@ class TDTestCase:
def insertData(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
-
+
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@@ -106,13 +106,13 @@ class TDTestCase:
'ctbPrefix': 'ctb',
'ctbNum': 1,
}
-
+
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
dbNumbers = 1
-
+
print(tdSql)
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
@@ -128,7 +128,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
- # add some error operations and
+ # add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@@ -137,12 +137,12 @@ class TDTestCase:
# create database and stable
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
- tdLog.info("Take turns stopping Mnodes ")
+ tdLog.info("Take turns stopping Mnodes ")
tdDnodes=cluster.dnodes
stopcount =0
threads=[]
-
+
for i in range(restartNumbers):
stableName= '%s%d'%(paraDict['stbName'],i)
newTdSql=tdCom.newTdSql()
@@ -151,7 +151,7 @@ class TDTestCase:
for tr in threads:
tr.start()
- tdLog.info("Take turns stopping Mnodes ")
+ tdLog.info("Take turns stopping Mnodes ")
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
@@ -159,7 +159,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@@ -171,19 +171,19 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
tdLog.info("123")
else:
print("456")
-
+
self.stopThread(threads)
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
-
+
for tr in threads:
tr.join()
clusterComCheck.checkDnodes(dnodeNumbers)
@@ -197,7 +197,7 @@ class TDTestCase:
tdSql.checkRows(allStbNumbers)
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='vnode')
@@ -206,4 +206,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/5dnode3mnodeSeperate1VnodeStopInsert.py b/tests/system-test/6-cluster/5dnode3mnodeSeperate1VnodeStopInsert.py
index aa1d7ecc29..bc1530bb8b 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeSeperate1VnodeStopInsert.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeSeperate1VnodeStopInsert.py
@@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -18,7 +18,7 @@ import time
import socket
import subprocess
from multiprocessing import Process
-import threading
+import threading
import time
import inspect
import ctypes
@@ -54,7 +54,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
- # """if it returns a number greater than one, you're in trouble,
+ # """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@@ -65,7 +65,7 @@ class TDTestCase:
def insert_data(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
-
+
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@@ -96,7 +96,7 @@ class TDTestCase:
for i in range(stableCount):
tdSql.query("select count(*) from %s%d"%(stbname,i))
tdSql.checkData(0,0,rowsPerSTable)
- return
+ return
def checkdnodes(self,dnodenumber):
count=0
@@ -104,8 +104,8 @@ class TDTestCase:
time.sleep(1)
statusReadyBumber=0
tdSql.query("show dnodes;")
- if tdSql.checkRows(dnodenumber) :
- print("dnode is %d nodes"%dnodenumber)
+ if tdSql.checkRows(dnodenumber) :
+ print("dnode is %d nodes"%dnodenumber)
for i in range(dnodenumber):
if tdSql.queryResult[i][4] !='ready' :
status=tdSql.queryResult[i][4]
@@ -122,15 +122,15 @@ class TDTestCase:
else:
print("%d mnodes is not ready in 10s "%dnodenumber)
return False
-
+
def check3mnode(self):
count=0
while count < 10:
time.sleep(1)
tdSql.query("show mnodes;")
- if tdSql.checkRows(3) :
- print("mnode is three nodes")
+ if tdSql.checkRows(3) :
+ print("mnode is three nodes")
if tdSql.queryResult[0][2]=='leader' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='follower':
@@ -140,19 +140,19 @@ class TDTestCase:
if tdSql.queryResult[1][2]=='leader':
if tdSql.queryResult[2][2]=='follower':
print("three mnodes is ready in 10s")
- break
+ break
elif tdSql.queryResult[0][2]=='follower' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='leader':
print("three mnodes is ready in 10s")
- break
+ break
count+=1
else:
print("three mnodes is not ready in 10s ")
return -1
- tdSql.query("show mnodes;")
- tdSql.checkRows(3)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
@@ -182,8 +182,8 @@ class TDTestCase:
return -1
tdSql.error("drop mnode on dnode 1;")
- tdSql.query("show mnodes;")
- tdSql.checkRows(3)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'offline')
tdSql.checkData(0,3,'ready')
@@ -210,8 +210,8 @@ class TDTestCase:
return -1
tdSql.error("drop mnode on dnode 2;")
- tdSql.query("show mnodes;")
- tdSql.checkRows(3)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@@ -239,8 +239,8 @@ class TDTestCase:
print("stop mnodes on dnode 3 failed in 10s")
return -1
tdSql.error("drop mnode on dnode 3;")
- tdSql.query("show mnodes;")
- tdSql.checkRows(3)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@@ -258,15 +258,15 @@ class TDTestCase:
tdSql.checkData(4,1,'%s:6430'%self.host)
tdSql.checkData(0,4,'ready')
tdSql.checkData(4,4,'ready')
-
+
def five_dnode_three_mnode(self,dnodenumber):
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(4,1,'%s:6430'%self.host)
tdSql.checkData(0,4,'ready')
tdSql.checkData(4,4,'ready')
- tdSql.query("show mnodes;")
- tdSql.checkRows(1)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(1)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@@ -281,15 +281,15 @@ class TDTestCase:
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
print(tdSql.queryResult)
- tdLog.debug("stop all of mnode ")
+ tdLog.debug("stop all of mnode ")
# seperate vnode and mnode in different dnodes.
# create database and stable
- stopcount =0
+ stopcount =0
while stopcount < 2:
for i in range(dnodenumber):
# threads=[]
- # threads = MyThreadFunc(self.insert_data(i*2,i*2+2))
+ # threads = MyThreadFunc(self.insert_data(i*2,i*2+2))
threads=threading.Thread(target=self.insert_data, args=(i,i+1))
threads.start()
self.TDDnodes.stoptaosd(i+1)
@@ -306,13 +306,13 @@ class TDTestCase:
return False
# self.check3mnode()
self.check3mnode()
-
+
stopcount+=1
self.check3mnode()
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.five_dnode_three_mnode(5)
@@ -321,4 +321,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/5dnode3mnodeStop.py b/tests/system-test/6-cluster/5dnode3mnodeStop.py
index 46e7771079..09974db884 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeStop.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeStop.py
@@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -15,13 +15,13 @@ from test import tdDnodes
sys.path.append("./6-cluster")
from clusterCommonCreate import *
-from clusterCommonCheck import *
+from clusterCommonCheck import *
import time
import socket
import subprocess
from multiprocessing import Process
-
+
class TDTestCase:
def init(self,conn ,logSql):
@@ -69,7 +69,7 @@ class TDTestCase:
dnodenumbers=int(dnodenumbers)
mnodeNums=int(mnodeNums)
dbNumbers = int(dnodenumbers * restartNumber)
-
+
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@@ -84,7 +84,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
- # add some error operations and
+ # add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@@ -109,10 +109,10 @@ class TDTestCase:
clusterComCheck.checkMnodeStatus(3)
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodenumbers=5,mnodeNums=3,restartNumber=1)
-
+
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
diff --git a/tests/system-test/6-cluster/5dnode3mnodeStop2Follower.py b/tests/system-test/6-cluster/5dnode3mnodeStop2Follower.py
index fef26333b7..9211ed3af8 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeStop2Follower.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeStop2Follower.py
@@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -15,13 +15,13 @@ from test import tdDnodes
sys.path.append("./6-cluster")
from clusterCommonCreate import *
-from clusterCommonCheck import *
+from clusterCommonCheck import *
import time
import socket
import subprocess
from multiprocessing import Process
-
+
class TDTestCase:
def init(self,conn ,logSql):
@@ -69,7 +69,7 @@ class TDTestCase:
dnodenumbers=int(dnodenumbers)
mnodeNums=int(mnodeNums)
dbNumbers = 1
-
+
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@@ -84,7 +84,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
- # add some error operations and
+ # add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@@ -99,7 +99,7 @@ class TDTestCase:
tdLog.info("check whether 2 mnode status is offline")
clusterComCheck.check3mnode2off()
# tdSql.error("create user user1 pass '123';")
-
+
tdLog.info("start two follower")
tdDnodes[1].starttaosd()
tdDnodes[2].starttaosd()
@@ -107,10 +107,10 @@ class TDTestCase:
clusterComCheck.checkMnodeStatus(mnodeNums)
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodenumbers=5,mnodeNums=3,restartNumber=1)
-
+
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
diff --git a/tests/system-test/6-cluster/5dnode3mnodeStopConnect.py b/tests/system-test/6-cluster/5dnode3mnodeStopConnect.py
index f1eb2a4587..0a8c94b080 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeStopConnect.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeStopConnect.py
@@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -16,13 +16,13 @@ from test import tdDnodes
sys.path.append("./6-cluster")
from clusterCommonCreate import *
-from clusterCommonCheck import *
+from clusterCommonCheck import *
import time
import socket
import subprocess
from multiprocessing import Process
-
+
class TDTestCase:
def init(self,conn ,logSql):
@@ -69,7 +69,7 @@ class TDTestCase:
dnodenumbers=int(dnodenumbers)
mnodeNums=int(mnodeNums)
dbNumbers = int(dnodenumbers * restartNumber)
-
+
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@@ -84,7 +84,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
- # add some error operations and
+ # add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@@ -96,21 +96,21 @@ class TDTestCase:
# restart all taosd
- tdDnodes=cluster.dnodes
+ tdDnodes=cluster.dnodes
for i in range(mnodeNums):
tdDnodes[i].stoptaosd()
for j in range(dnodenumbers):
if j != i:
cluster.checkConnectStatus(j)
clusterComCheck.check3mnodeoff(i+1,3)
- clusterComCheck.init(cluster.checkConnectStatus(i+1))
+ clusterComCheck.init(cluster.checkConnectStatus(i+1))
tdDnodes[i].starttaosd()
clusterComCheck.checkMnodeStatus(mnodeNums)
-
- tdLog.info("Take turns stopping all dnodes ")
+
+ tdLog.info("Take turns stopping all dnodes ")
# seperate vnode and mnode in different dnodes.
# create database and stable
- stopcount =0
+ stopcount =0
while stopcount < restartNumber:
tdLog.info("first restart loop")
for i in range(dnodenumbers):
@@ -120,13 +120,13 @@ class TDTestCase:
clusterComCheck.checkDnodes(dnodenumbers)
clusterComCheck.checkMnodeStatus(mnodeNums)
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(5,3,1)
-
+
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/5dnode3mnodeStopFollowerLeader.py b/tests/system-test/6-cluster/5dnode3mnodeStopFollowerLeader.py
index 59a1a8f697..e5cf7c5254 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeStopFollowerLeader.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeStopFollowerLeader.py
@@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -15,13 +15,13 @@ from test import tdDnodes
sys.path.append("./6-cluster")
from clusterCommonCreate import *
-from clusterCommonCheck import *
+from clusterCommonCheck import *
import time
import socket
import subprocess
from multiprocessing import Process
-
+
class TDTestCase:
def init(self,conn ,logSql):
@@ -69,7 +69,7 @@ class TDTestCase:
dnodenumbers=int(dnodenumbers)
mnodeNums=int(mnodeNums)
dbNumbers = 1
-
+
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@@ -84,7 +84,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
- # add some error operations and
+ # add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@@ -100,20 +100,20 @@ class TDTestCase:
# tdLog.info("check whether 2 mnode status is offline")
# clusterComCheck.check3mnode2off()
# tdSql.error("create user user1 pass '123';")
-
+
tdLog.info("start one mnode" )
tdDnodes[0].starttaosd()
clusterComCheck.check3mnodeoff(2)
-
+
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
clusterComCheck.checkDb(dbNumbers,1,'db0')
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(dnodenumbers=5,mnodeNums=3,restartNumber=1)
-
+
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
diff --git a/tests/system-test/6-cluster/5dnode3mnodeStopInsert.py b/tests/system-test/6-cluster/5dnode3mnodeStopInsert.py
index a53930faac..0d7e453042 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeStopInsert.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeStopInsert.py
@@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -13,7 +13,7 @@ import time
import socket
import subprocess
from multiprocessing import Process
-import threading
+import threading
import time
import inspect
import ctypes
@@ -36,7 +36,7 @@ class TDTestCase:
self.host=self.master_dnode.cfgDict["fqdn"]
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
tdSql.init(conn1.cursor())
-
+
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -62,7 +62,7 @@ class TDTestCase:
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
- # """if it returns a number greater than one, you're in trouble,
+ # """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
@@ -73,7 +73,7 @@ class TDTestCase:
def insert_data(self,countstart,countstop):
# fisrt add data : db\stable\childtable\general table
-
+
for couti in range(countstart,countstop):
tdLog.debug("drop database if exists db%d" %couti)
tdSql.execute("drop database if exists db%d" %couti)
@@ -95,10 +95,10 @@ class TDTestCase:
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
- def depoly_cluster(self ,dnodes_nums):
+ def depoly_cluster(self ,dnodes_nums):
testCluster = False
- valgrind = 0
+ valgrind = 0
hostname = socket.gethostname()
dnodes = []
start_port = 6030
@@ -112,7 +112,7 @@ class TDTestCase:
dnode.addExtraCfg("monitorPort", 7043)
dnode.addExtraCfg("secondEp", f"{hostname}:{start_port_sec}")
dnodes.append(dnode)
-
+
self.TDDnodes = MyDnodes(dnodes)
self.TDDnodes.init("")
self.TDDnodes.setTestCluster(testCluster)
@@ -120,11 +120,11 @@ class TDTestCase:
self.TDDnodes.stopAll()
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.deploy(dnode.index,{})
-
+
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.starttaosd(dnode.index)
- # create cluster
+ # create cluster
for dnode in self.TDDnodes.dnodes[1:]:
# tdLog.debug(dnode.cfgDict)
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
@@ -133,7 +133,7 @@ class TDTestCase:
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
tdLog.debug(cmd)
os.system(cmd)
-
+
time.sleep(2)
tdLog.info(" create cluster with %d dnode done! " %dnodes_nums)
@@ -143,8 +143,8 @@ class TDTestCase:
time.sleep(1)
statusReadyBumber=0
tdSql.query("show dnodes;")
- if tdSql.checkRows(dnodenumber) :
- tdLog.debug("dnode is %d nodes"%dnodenumber)
+ if tdSql.checkRows(dnodenumber) :
+ tdLog.debug("dnode is %d nodes"%dnodenumber)
for i in range(dnodenumber):
if tdSql.queryResult[i][4] !='ready' :
status=tdSql.queryResult[i][4]
@@ -161,15 +161,15 @@ class TDTestCase:
else:
tdLog.debug("%d mnodes is not ready in 10s "%dnodenumber)
return False
-
+
def check3mnode(self):
count=0
while count < 10:
time.sleep(1)
tdSql.query("show mnodes;")
- if tdSql.checkRows(3) :
- tdLog.debug("mnode is three nodes")
+ if tdSql.checkRows(3) :
+ tdLog.debug("mnode is three nodes")
if tdSql.queryResult[0][2]=='leader' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='follower':
@@ -179,19 +179,19 @@ class TDTestCase:
if tdSql.queryResult[1][2]=='leader':
if tdSql.queryResult[2][2]=='follower':
tdLog.debug("three mnodes is ready in 10s")
- break
+ break
elif tdSql.queryResult[0][2]=='follower' :
if tdSql.queryResult[1][2]=='follower':
if tdSql.queryResult[2][2]=='leader':
tdLog.debug("three mnodes is ready in 10s")
- break
+ break
count+=1
else:
tdLog.debug("three mnodes is not ready in 10s ")
return -1
- tdSql.query("show mnodes;")
- tdSql.checkRows(3)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,3,'ready')
tdSql.checkData(1,1,'%s:6130'%self.host)
@@ -221,8 +221,8 @@ class TDTestCase:
return -1
tdSql.error("drop mnode on dnode 1;")
- tdSql.query("show mnodes;")
- tdSql.checkRows(3)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'offline')
tdSql.checkData(0,3,'ready')
@@ -249,8 +249,8 @@ class TDTestCase:
return -1
tdSql.error("drop mnode on dnode 2;")
- tdSql.query("show mnodes;")
- tdSql.checkRows(3)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@@ -278,8 +278,8 @@ class TDTestCase:
tdLog.debug("stop mnodes on dnode 3 failed in 10s")
return -1
tdSql.error("drop mnode on dnode 3;")
- tdSql.query("show mnodes;")
- tdSql.checkRows(3)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(3)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@@ -296,8 +296,8 @@ class TDTestCase:
tdSql.checkData(4,1,'%s:6430'%self.host)
tdSql.checkData(0,4,'ready')
tdSql.checkData(4,4,'ready')
- tdSql.query("show mnodes;")
- tdSql.checkRows(1)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(1)
tdSql.checkData(0,1,'%s:6030'%self.host)
tdSql.checkData(0,2,'leader')
tdSql.checkData(0,3,'ready')
@@ -312,13 +312,13 @@ class TDTestCase:
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
tdLog.debug(tdSql.queryResult)
- tdLog.debug("stop all of mnode ")
+ tdLog.debug("stop all of mnode ")
- stopcount =0
+ stopcount =0
while stopcount <= 2:
for i in range(dnodenumber):
# threads=[]
- # threads = MyThreadFunc(self.insert_data(i*2,i*2+2))
+ # threads = MyThreadFunc(self.insert_data(i*2,i*2+2))
threads=threading.Thread(target=self.insert_data, args=((stopcount+i)*2,(i+stopcount)*2+2))
threads.start()
self.TDDnodes.stoptaosd(i+1)
@@ -344,7 +344,7 @@ class TDTestCase:
return taos.connect(host=host, port=int(port), config=config_dir)
- def run(self):
+ def run(self):
# tdLog.debug(self.master_dnode.cfgDict)
self.buildcluster(5)
self.five_dnode_three_mnode(5)
@@ -354,4 +354,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/5dnode3mnodeStopLoop.py b/tests/system-test/6-cluster/5dnode3mnodeStopLoop.py
index e0c91e5ac4..c7c45a19c6 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeStopLoop.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeStopLoop.py
@@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -15,13 +15,13 @@ from test import tdDnodes
sys.path.append("./6-cluster")
from clusterCommonCreate import *
-from clusterCommonCheck import *
+from clusterCommonCheck import *
import time
import socket
import subprocess
from multiprocessing import Process
-
+
class TDTestCase:
def init(self,conn ,logSql):
@@ -69,7 +69,7 @@ class TDTestCase:
dnodenumbers=int(dnodenumbers)
mnodeNums=int(mnodeNums)
dbNumbers = int(dnodenumbers * restartNumber)
-
+
tdLog.info("first check dnode and mnode")
tdSql.query("show dnodes;")
tdSql.checkData(0,1,'%s:6030'%self.host)
@@ -84,7 +84,7 @@ class TDTestCase:
tdSql.execute("create mnode on dnode 3")
clusterComCheck.checkMnodeStatus(3)
- # add some error operations and
+ # add some error operations and
tdLog.info("Confirm the status of the dnode again")
tdSql.error("create mnode on dnode 2")
tdSql.query("show dnodes;")
@@ -93,10 +93,10 @@ class TDTestCase:
# restart all taosd
tdDnodes=cluster.dnodes
- tdLog.info("Take turns stopping all dnodes ")
+ tdLog.info("Take turns stopping all dnodes ")
# seperate vnode and mnode in different dnodes.
# create database and stable
- stopcount =0
+ stopcount =0
while stopcount <= 2:
tdLog.info(" restart loop: %d"%stopcount )
for i in range(dnodenumbers):
@@ -106,10 +106,10 @@ class TDTestCase:
clusterComCheck.checkDnodes(dnodenumbers)
clusterComCheck.checkMnodeStatus(3)
- def run(self):
+ def run(self):
# print(self.master_dnode.cfgDict)
self.fiveDnodeThreeMnode(5,3,1)
-
+
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
diff --git a/tests/system-test/6-cluster/clusterCommonCheck.py b/tests/system-test/6-cluster/clusterCommonCheck.py
index b758e6e71f..196b362f45 100644
--- a/tests/system-test/6-cluster/clusterCommonCheck.py
+++ b/tests/system-test/6-cluster/clusterCommonCheck.py
@@ -48,10 +48,10 @@ class ClusterComCheck:
if tdSql.queryResult[i][4] == "ready":
status+=1
tdLog.info(status)
-
+
if status == dnodeNumbers:
tdLog.success("it find cluster with %d dnodes and check that all cluster dnodes are ready within 30s! " %dnodeNumbers)
- return True
+ return True
count+=1
time.sleep(1)
else:
@@ -77,15 +77,15 @@ class ClusterComCheck:
def checkDb(self,dbNumbers,restartNumber,dbNameIndex):
count=0
alldbNumbers=(dbNumbers*restartNumber)+2
- while count < 5:
+ while count < 5:
query_status=0
for j in range(dbNumbers):
for i in range(alldbNumbers):
tdSql.query("show databases;")
- if "%s_%d"%(dbNameIndex,j) == tdSql.queryResult[i][0] :
+ if "%s_%d"%(dbNameIndex,j) == tdSql.queryResult[i][0] :
if tdSql.queryResult[i][15] == "ready":
query_status+=1
- tdLog.debug("check %s_%d that status is ready "%(dbNameIndex,j))
+ tdLog.debug("check %s_%d that status is ready "%(dbNameIndex,j))
else:
continue
# print(query_status)
@@ -107,7 +107,7 @@ class ClusterComCheck:
for i in range(stableCount):
tdSql.query("select count(*) from %s%d"%(stbname,i))
tdSql.checkData(0,0,rowsPerSTable)
- return
+ return
def checkMnodeStatus(self,mnodeNums):
self.mnodeNums=int(mnodeNums)
@@ -118,15 +118,15 @@ class ClusterComCheck:
while count < 10:
time.sleep(1)
tdSql.query("show mnodes;")
- if tdSql.checkRows(self.mnodeNums) :
+ if tdSql.checkRows(self.mnodeNums) :
tdLog.success("cluster has %d mnodes" %self.mnodeNums )
if self.mnodeNums == 1:
if tdSql.queryResult[0][2]== 'leader' and tdSql.queryResult[0][3]== 'ready' :
tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums)
- return True
- count+=1
- elif self.mnodeNums == 3 :
+ return True
+ count+=1
+ elif self.mnodeNums == 3 :
if tdSql.queryResult[0][2]=='leader' and tdSql.queryResult[0][3]== 'ready' :
if tdSql.queryResult[1][2]=='follower' and tdSql.queryResult[1][3]== 'ready' :
if tdSql.queryResult[2][2]=='follower' and tdSql.queryResult[2][3]== 'ready' :
@@ -141,9 +141,9 @@ class ClusterComCheck:
if tdSql.queryResult[0][2]=='follower' and tdSql.queryResult[0][3]== 'ready' :
if tdSql.queryResult[1][2]=='follower' and tdSql.queryResult[1][3]== 'ready' :
tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums)
- return True
+ return True
count+=1
- elif self.mnodeNums == 2 :
+ elif self.mnodeNums == 2 :
if tdSql.queryResult[0][2]=='leader' and tdSql.queryResult[0][3]== 'ready' :
if tdSql.queryResult[1][2]=='follower' and tdSql.queryResult[1][3]== 'ready' :
tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums)
@@ -157,7 +157,7 @@ class ClusterComCheck:
tdLog.debug(tdSql.queryResult)
tdLog.exit("cluster of %d mnodes is not ready in 10s " %self.mnodeNums)
-
+
def check3mnodeoff(self,offlineDnodeNo,mnodeNums=3):
@@ -224,7 +224,7 @@ class ClusterComCheck:
else:
tdLog.debug(tdSql.queryResult)
tdLog.exit("stop mnodes on dnode %d failed in 10s ")
-
+
diff --git a/tests/system-test/6-cluster/clusterCommonCreate.py b/tests/system-test/6-cluster/clusterCommonCreate.py
index 667e5e383e..299829144e 100644
--- a/tests/system-test/6-cluster/clusterCommonCreate.py
+++ b/tests/system-test/6-cluster/clusterCommonCreate.py
@@ -37,23 +37,23 @@ class ClusterComCreate:
tdSql.init(conn.cursor())
# tdSql.init(conn.cursor(), logSql) # output sql.txt file
- def initConsumerTable(self,cdbName='cdb'):
+ def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
- tdSql.query("drop table if exists %s.notifyinfo "%(cdbName))
+ tdSql.query("drop table if exists %s.notifyinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
tdSql.query("create table %s.notifyinfo (ts timestamp, cmdid int, consumerid int)"%cdbName)
- def initConsumerInfoTable(self,cdbName='cdb'):
+ def initConsumerInfoTable(self,cdbName='cdb'):
tdLog.info("drop consumeinfo table")
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
- def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@@ -68,11 +68,11 @@ class ClusterComCreate:
break
else:
time.sleep(5)
-
+
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
-
+
return resultList
def startTmqSimProcess(self,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@@ -82,14 +82,14 @@ class ClusterComCreate:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
-
+
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
- shellCmd += "> nul 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
@@ -142,7 +142,7 @@ class ClusterComCreate:
tdLog.debug("create table if not exists %s.%s_%d (ts timestamp, c1 int, c2 int, c3 binary(16)) tags(t1 int, t2 binary(32))"%(dbNameIndex, stbNameIndex,i))
tsql.execute("create table if not exists %s.%s_%d (ts timestamp, c1 int, c2 int, c3 binary(16)) tags(t1 int, t2 binary(32))"%(dbNameIndex, stbNameIndex,i))
tdLog.debug("complete to create %s.%s_%d" %(dbNameIndex, stbNameIndex,i))
- return
+ return
def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1):
tsql.execute("use %s" %dbName)
@@ -153,14 +153,14 @@ class ClusterComCreate:
tagValue = 'beijing'
if (i % 2 == 0):
tagValue = 'shanghai'
-
+
sql += " %s_%d using %s tags(%d, '%s')"%(ctbPrefix,i,stbName,i+1, tagValue)
if (i > 0) and (i%100 == 0):
tsql.execute(sql)
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@@ -189,7 +189,7 @@ class ClusterComCreate:
#print("insert sql:%s"%sql)
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
- return
+ return
def insert_data_1(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs):
tdLog.debug("start to insert data ............")
@@ -235,7 +235,7 @@ class ClusterComCreate:
ctbDict[i] = 0
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfCtb = 0
+ rowsOfCtb = 0
while rowsOfCtb < rowsPerTbl:
for i in range(ctbNum):
sql += " %s.%s_%d values "%(dbName,ctbPrefix,i)
@@ -262,7 +262,7 @@ class ClusterComCreate:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfSql = 0
+ rowsOfSql = 0
for i in range(ctbNum):
sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i)
for j in range(rowsPerTbl):
@@ -294,7 +294,7 @@ class ClusterComCreate:
for i in range(ctbNum):
tbName = '%s%s'%(ctbPrefix,i)
tdCom.insert_rows(tsql,dbname=paraDict["dbName"],tbname=tbName,start_ts_value=paraDict['startTs'],count=paraDict['rowsPerTbl'])
- return
+ return
def threadFunction(self, **paraDict):
# create new connector for new tdSql instance in my thread
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py
index 14494f1171..e93b13278b 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py
@@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -95,7 +95,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
-
+
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
@@ -126,7 +126,7 @@ class TDTestCase:
return taos.connect(host=host, port=int(port), config=config_dir)
- def run(self):
+ def run(self):
self.check_setup_cluster_status()
self.create_db_check_vgroups()
@@ -135,4 +135,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py
index 9a21dab855..7638d8227f 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py
@@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -26,9 +26,9 @@ class TDTestCase:
self.dnode_list = {}
self.ts = 1483200000000
self.db_name ='testdb'
- self.replica = 1
+ self.replica = 1
self.vgroups = 2
- self.tb_nums = 10
+ self.tb_nums = 10
self.row_nums = 100
def getBuildPath(self):
@@ -101,7 +101,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
-
+
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
@@ -145,7 +145,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
'''
)
-
+
for i in range(tb_nums):
sub_tbname = "sub_tb_{}".format(i)
tdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i))
@@ -164,7 +164,7 @@ class TDTestCase:
tdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1'))
tdSql.checkRows(tb_nums)
- def run(self):
+ def run(self):
self.check_setup_cluster_status()
self.create_db_check_vgroups()
self.create_db_replica_1_insertdatas(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums)
@@ -176,4 +176,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py
index 0b6ab8721a..5d112f4352 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py
@@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -26,9 +26,9 @@ class TDTestCase:
self.dnode_list = {}
self.ts = 1483200000000
self.db_name ='testdb'
- self.replica = 3
+ self.replica = 3
self.vgroups = 2
- self.tb_nums = 10
+ self.tb_nums = 10
self.row_nums = 100
def getBuildPath(self):
@@ -101,7 +101,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
-
+
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
@@ -145,7 +145,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
'''
)
-
+
for i in range(tb_nums):
sub_tbname = "sub_tb_{}".format(i)
tdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i))
@@ -164,7 +164,7 @@ class TDTestCase:
tdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1'))
tdSql.checkRows(tb_nums)
- def run(self):
+ def run(self):
self.check_setup_cluster_status()
self.create_db_check_vgroups()
self.create_db_replica_3_insertdatas(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums)
@@ -176,4 +176,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py
index 3d6b548bdd..ce37f09d7d 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py
@@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -30,9 +30,9 @@ class TDTestCase:
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
- self.replica = 3
+ self.replica = 3
self.vgroups = 1
- self.tb_nums = 10
+ self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
@@ -110,7 +110,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
-
+
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
@@ -142,7 +142,7 @@ class TDTestCase:
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
-
+
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
@@ -151,7 +151,7 @@ class TDTestCase:
tags (t1 int)
'''.format(stablename)
)
-
+
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
@@ -162,11 +162,11 @@ class TDTestCase:
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
-
+
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
-
+
tdSql.execute("use {}".format(dbname))
-
+
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
@@ -174,9 +174,9 @@ class TDTestCase:
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
-
+
tdSql.execute("use {}".format(dbname))
-
+
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
@@ -184,8 +184,8 @@ class TDTestCase:
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
-
- count = 0
+
+ count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
@@ -199,14 +199,14 @@ class TDTestCase:
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.debug(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
-
+
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
- count = 0
+ count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
@@ -224,7 +224,7 @@ class TDTestCase:
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
- leader_infos = vgroup_info[3:-4]
+ leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='follower':
@@ -236,7 +236,7 @@ class TDTestCase:
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
-
+
def _get_status():
newTdSql=tdCom.newTdSql()
@@ -249,7 +249,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="offline":
@@ -259,7 +259,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has stopped , id is {} ====".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
-
+
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
@@ -271,7 +271,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="ready":
@@ -365,7 +365,7 @@ class TDTestCase:
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
-
+
def sync_run_case(self):
# stop follower and insert datas , update tables and create new stables
tdDnodes=cluster.dnodes
@@ -375,19 +375,19 @@ class TDTestCase:
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
self.stop_dnode_id = self._get_stop_dnode_id(db_name)
-
+
# check rows of datas
-
+
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
-
- # begin stop dnode
+
+ # begin stop dnode
start = time.time()
tdDnodes[self.stop_dnode_id-1].stoptaosd()
-
+
self.wait_stop_dnode_OK()
- # append rows of stablename when dnode stop
-
+ # append rows of stablename when dnode stop
+
tbname = "sub_{}_{}".format(stablename , 0)
tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
@@ -400,20 +400,20 @@ class TDTestCase:
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
- # begin start dnode
+ # begin start dnode
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end -start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
-
- # create new stables again
+
+ # create new stables again
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
-
+
def unsync_run_case(self):
def _restart_dnode_of_db_unsync(dbname):
@@ -427,18 +427,18 @@ class TDTestCase:
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
-
+
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
-
+
def _create_threading(dbname):
self.current_thread = threading.Thread(target=_restart_dnode_of_db_unsync, args=(dbname,))
return self.current_thread
-
+
'''
- in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
+ in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
'''
tdDnodes=cluster.dnodes
for loop in range(self.loop_restart_times):
@@ -449,7 +449,7 @@ class TDTestCase:
tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
- # create sync threading and start it
+ # create sync threading and start it
self.current_thread = _create_threading(db_name)
self.current_thread.start()
@@ -468,7 +468,7 @@ class TDTestCase:
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
- # create new stables again
+ # create new stables again
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
@@ -477,7 +477,7 @@ class TDTestCase:
self.current_thread.join()
- def run(self):
+ def run(self):
# basic insert and check of cluster
self.check_setup_cluster_status()
@@ -485,7 +485,7 @@ class TDTestCase:
self.sync_run_case()
# self.unsync_run_case()
-
+
def stop(self):
@@ -493,4 +493,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py
index 8ec6349879..8869b5d3a9 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py
@@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -30,9 +30,9 @@ class TDTestCase:
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
- self.replica = 3
+ self.replica = 3
self.vgroups = 1
- self.tb_nums = 10
+ self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
@@ -110,7 +110,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
-
+
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
@@ -142,7 +142,7 @@ class TDTestCase:
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
-
+
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
@@ -151,7 +151,7 @@ class TDTestCase:
tags (t1 int)
'''.format(stablename)
)
-
+
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
@@ -162,11 +162,11 @@ class TDTestCase:
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
-
+
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
-
+
tdSql.execute("use {}".format(dbname))
-
+
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
@@ -174,9 +174,9 @@ class TDTestCase:
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
-
+
tdSql.execute("use {}".format(dbname))
-
+
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
@@ -184,8 +184,8 @@ class TDTestCase:
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
-
- count = 0
+
+ count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
@@ -199,14 +199,14 @@ class TDTestCase:
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
-
+
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
- count = 0
+ count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
@@ -220,12 +220,12 @@ class TDTestCase:
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
count += 1
-
+
def _get_stop_dnode_id(self,dbname):
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
- leader_infos = vgroup_info[3:-4]
+ leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='follower':
@@ -237,7 +237,7 @@ class TDTestCase:
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
-
+
def _get_status():
newTdSql=tdCom.newTdSql()
@@ -250,7 +250,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="offline":
@@ -260,7 +260,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
-
+
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
@@ -272,7 +272,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="ready":
@@ -366,7 +366,7 @@ class TDTestCase:
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
-
+
def sync_run_case(self):
# stop follower and insert datas , update tables and create new stables
tdDnodes=cluster.dnodes
@@ -376,19 +376,19 @@ class TDTestCase:
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
self.stop_dnode_id = self._get_stop_dnode_id(db_name)
-
+
# check rows of datas
-
+
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
-
- # begin stop dnode
+
+ # begin stop dnode
start = time.time()
tdDnodes[self.stop_dnode_id-1].stoptaosd()
-
+
self.wait_stop_dnode_OK()
- # append rows of stablename when dnode stop
-
+ # append rows of stablename when dnode stop
+
tbname = "sub_{}_{}".format(stablename , 0)
tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
@@ -401,20 +401,20 @@ class TDTestCase:
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
- # begin start dnode
+ # begin start dnode
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end -start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
-
- # create new stables again
+
+ # create new stables again
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
-
+
def unsync_run_case(self):
def _restart_dnode_of_db_unsync(dbname):
@@ -428,18 +428,18 @@ class TDTestCase:
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
-
+
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
-
+
def _create_threading(dbname):
self.current_thread = threading.Thread(target=_restart_dnode_of_db_unsync, args=(dbname,))
return self.current_thread
-
+
'''
- in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
+ in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
'''
tdDnodes=cluster.dnodes
for loop in range(self.loop_restart_times):
@@ -450,7 +450,7 @@ class TDTestCase:
tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
- # create sync threading and start it
+ # create sync threading and start it
self.current_thread = _create_threading(db_name)
self.current_thread.start()
@@ -469,7 +469,7 @@ class TDTestCase:
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
- # create new stables again
+ # create new stables again
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
@@ -478,7 +478,7 @@ class TDTestCase:
self.current_thread.join()
- def run(self):
+ def run(self):
# basic insert and check of cluster
self.check_setup_cluster_status()
@@ -486,7 +486,7 @@ class TDTestCase:
# self.sync_run_case()
self.unsync_run_case()
-
+
def stop(self):
@@ -494,4 +494,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py
index fa7e5292de..0241cb5e32 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py
@@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -30,9 +30,9 @@ class TDTestCase:
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
- self.replica = 3
+ self.replica = 3
self.vgroups = 1
- self.tb_nums = 10
+ self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
@@ -110,7 +110,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
-
+
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
@@ -142,7 +142,7 @@ class TDTestCase:
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
-
+
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
@@ -151,7 +151,7 @@ class TDTestCase:
tags (t1 int)
'''.format(stablename)
)
-
+
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
@@ -162,11 +162,11 @@ class TDTestCase:
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
-
+
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
-
+
tdSql.execute("use {}".format(dbname))
-
+
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
@@ -176,7 +176,7 @@ class TDTestCase:
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
-
+
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
@@ -184,8 +184,8 @@ class TDTestCase:
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
-
- count = 0
+
+ count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
@@ -199,14 +199,14 @@ class TDTestCase:
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
-
+
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
- count = 0
+ count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
@@ -225,7 +225,7 @@ class TDTestCase:
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
- leader_infos = vgroup_info[3:-4]
+ leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='follower':
@@ -237,7 +237,7 @@ class TDTestCase:
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
-
+
def _get_status():
newTdSql=tdCom.newTdSql()
@@ -250,7 +250,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="offline":
@@ -260,7 +260,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
-
+
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
@@ -272,7 +272,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="ready":
@@ -366,7 +366,7 @@ class TDTestCase:
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
-
+
def sync_run_case(self):
# stop follower and insert datas , update tables and create new stables
tdDnodes=cluster.dnodes
@@ -376,19 +376,19 @@ class TDTestCase:
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
self.stop_dnode_id = self._get_stop_dnode_id(db_name)
-
+
# check rows of datas
-
+
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
-
- # begin stop dnode
+
+ # begin stop dnode
start = time.time()
tdDnodes[self.stop_dnode_id-1].forcestop()
-
+
self.wait_stop_dnode_OK()
- # append rows of stablename when dnode stop
-
+ # append rows of stablename when dnode stop
+
tbname = "sub_{}_{}".format(stablename , 0)
tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
@@ -401,20 +401,20 @@ class TDTestCase:
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
- # begin start dnode
+ # begin start dnode
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end -start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
-
- # create new stables again
+
+ # create new stables again
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
-
+
def unsync_run_case(self):
def _restart_dnode_of_db_unsync(dbname):
@@ -425,26 +425,26 @@ class TDTestCase:
time.sleep(0.5)
self.stop_dnode_id = self._get_stop_dnode_id(dbname)
# begin restart dnode
-
- # force stop taosd by kill -9
+
+ # force stop taosd by kill -9
self.force_stop_dnode(self.stop_dnode_id)
self.wait_stop_dnode_OK()
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
-
+
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
-
+
def _create_threading(dbname):
self.current_thread = threading.Thread(target=_restart_dnode_of_db_unsync, args=(dbname,))
return self.current_thread
-
+
'''
- in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
+ in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
'''
tdDnodes=cluster.dnodes
for loop in range(self.loop_restart_times):
@@ -455,7 +455,7 @@ class TDTestCase:
tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
- # create sync threading and start it
+ # create sync threading and start it
self.current_thread = _create_threading(db_name)
self.current_thread.start()
@@ -474,7 +474,7 @@ class TDTestCase:
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
- # create new stables again
+ # create new stables again
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
@@ -488,7 +488,7 @@ class TDTestCase:
port = None
for dnode_info in tdSql.queryResult:
if dnode_id == dnode_info[0]:
- port = dnode_info[1].split(":")[-1]
+ port = dnode_info[1].split(":")[-1]
break
else:
continue
@@ -502,7 +502,7 @@ class TDTestCase:
os.system(ps_kill_taosd)
- def run(self):
+ def run(self):
# basic insert and check of cluster
self.check_setup_cluster_status()
@@ -510,7 +510,7 @@ class TDTestCase:
# self.sync_run_case()
self.unsync_run_case()
-
+
def stop(self):
@@ -518,4 +518,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py
index 2e4b299fc6..df865e51fc 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py
@@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -28,9 +28,9 @@ class TDTestCase:
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
- self.replica = 3
+ self.replica = 3
self.vgroups = 1
- self.tb_nums = 10
+ self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 10
@@ -182,7 +182,7 @@ class TDTestCase:
newTdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = newTdSql.queryResult
for vgroup_info in vgroup_infos:
- leader_infos = vgroup_info[3:-4]
+ leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='leader':
@@ -194,7 +194,7 @@ class TDTestCase:
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
-
+
def _get_status():
newTdSql=tdCom.newTdSql()
@@ -207,7 +207,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="offline":
@@ -217,7 +217,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
-
+
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
@@ -229,7 +229,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="ready":
@@ -239,7 +239,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
def get_leader_infos(self ,dbname):
-
+
newTdSql=tdCom.newTdSql()
newTdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = newTdSql.queryResult
@@ -259,7 +259,7 @@ class TDTestCase:
tdLog.exit(" ===maybe revote not occured , there is no dnode offline ====")
else:
for vgroup_info in vote_act:
- for ind , role in enumerate(vgroup_info):
+ for ind , role in enumerate(vgroup_info):
if role==self.stop_dnode_id:
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
@@ -276,25 +276,25 @@ class TDTestCase:
benchmark_build_path = self.getBuildPath() + '/build/bin/taosBenchmark'
tdLog.notice("==== start taosBenchmark insert datas of database {} ==== ".format(dbname))
os.system(" {} -f {} >>/dev/null 2>&1 ".format(benchmark_build_path , json_file))
-
+
def stop_leader_when_Benchmark_inserts(self,dbname , total_rows , json_file ):
# stop follower and insert datas , update tables and create new stables
tdDnodes=cluster.dnodes
tdSql.execute(" drop database if exists {} ".format(dbname))
tdSql.execute(" create database {} replica {} vgroups {}".format(dbname , self.replica , self.vgroups))
-
- # start insert datas using taosBenchmark ,expect insert 10000 rows
-
+
+ # start insert datas using taosBenchmark ,expect insert 10000 rows
+
self.current_thread = threading.Thread(target=self.start_benchmark_inserts, args=(dbname,json_file))
self.current_thread.start()
tdSql.query(" show databases ")
-
- # make sure create database ok
+
+ # make sure create database ok
while (tdSql.queryRows!=3):
time.sleep(0.5)
tdSql.query(" show databases ")
- # # make sure create stable ok
+ # # make sure create stable ok
tdSql.query(" show {}.stables ".format(dbname))
while (tdSql.queryRows!=1):
time.sleep(0.5)
@@ -313,14 +313,14 @@ class TDTestCase:
tdLog.debug(" === current insert {} rows in database {} === ".format(tdSql.queryResult[0][0] , dbname))
time.sleep(0.01)
tdSql.query(" select count(*) from {}.{} ".format(dbname,"stb1"))
-
+
tdLog.debug(" === database {} has write {} rows at least ====".format(dbname,total_rows/10))
self.stop_dnode_id = self._get_stop_dnode_id(dbname)
- # prepare stop leader of database
+ # prepare stop leader of database
before_leader_infos = self.get_leader_infos(dbname)
-
+
tdDnodes[self.stop_dnode_id-1].stoptaosd()
# self.current_thread.join()
after_leader_infos = self.get_leader_infos(dbname)
@@ -331,7 +331,7 @@ class TDTestCase:
after_leader_infos = self.get_leader_infos(dbname)
revote_status = self.check_revote_leader_success(dbname ,before_leader_infos , after_leader_infos)
end = time.time()
- time_cost = end - start
+ time_cost = end - start
tdLog.debug(" ==== revote leader of database {} cost time {} ====".format(dbname , time_cost))
self.current_thread.join()
@@ -344,7 +344,7 @@ class TDTestCase:
- def run(self):
+ def run(self):
# basic insert and check of cluster
# self.check_setup_cluster_status()
@@ -359,4 +359,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py
index c19a308f1c..8d6f026165 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py
@@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -28,9 +28,9 @@ class TDTestCase:
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
- self.replica = 3
+ self.replica = 3
self.vgroups = 1
- self.tb_nums = 10
+ self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 10
@@ -193,7 +193,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
-
+
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
@@ -225,7 +225,7 @@ class TDTestCase:
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
-
+
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
@@ -234,8 +234,8 @@ class TDTestCase:
tags (t1 int)
'''.format(stablename)
)
-
- for i in range(tb_nums):
+
+ for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
# insert datas about new database
@@ -245,11 +245,11 @@ class TDTestCase:
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
-
+
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
-
+
tdSql.execute("use {}".format(dbname))
-
+
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
@@ -257,9 +257,9 @@ class TDTestCase:
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
-
+
tdSql.execute("use {}".format(dbname))
-
+
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
@@ -267,8 +267,8 @@ class TDTestCase:
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
-
- count = 0
+
+ count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
@@ -282,14 +282,14 @@ class TDTestCase:
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {} ====".format(count , dbname))
count += 1
-
+
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
- count = 0
+ count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
@@ -309,7 +309,7 @@ class TDTestCase:
newTdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = newTdSql.queryResult
for vgroup_info in vgroup_infos:
- leader_infos = vgroup_info[3:-4]
+ leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='leader':
@@ -321,7 +321,7 @@ class TDTestCase:
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
-
+
def _get_status():
newTdSql=tdCom.newTdSql()
@@ -334,7 +334,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="offline":
@@ -344,7 +344,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
-
+
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
@@ -356,7 +356,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="ready":
@@ -366,7 +366,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
def get_leader_infos(self ,dbname):
-
+
newTdSql=tdCom.newTdSql()
newTdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = newTdSql.queryResult
@@ -386,7 +386,7 @@ class TDTestCase:
tdLog.exit(" ===maybe revote not occured , there is no dnode offline ====")
else:
for vgroup_info in vote_act:
- for ind , role in enumerate(vgroup_info):
+ for ind , role in enumerate(vgroup_info):
if role==self.stop_dnode_id:
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
@@ -400,12 +400,12 @@ class TDTestCase:
return check_status
def force_stop_dnode(self, dnode_id ):
-
+
tdSql.query("show dnodes")
port = None
for dnode_info in tdSql.queryResult:
if dnode_id == dnode_info[0]:
- port = dnode_info[1].split(":")[-1]
+ port = dnode_info[1].split(":")[-1]
break
else:
continue
@@ -427,25 +427,25 @@ class TDTestCase:
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
self.stop_dnode_id = self._get_stop_dnode_id(db_name)
-
+
# check rows of datas
-
+
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
- # get leader info before stop
+ # get leader info before stop
before_leader_infos = self.get_leader_infos(db_name)
- # begin stop dnode
- # force stop taosd by kill -9
+ # begin stop dnode
+ # force stop taosd by kill -9
self.force_stop_dnode(self.stop_dnode_id)
self.wait_stop_dnode_OK()
# vote leaders check
- # get leader info after stop
+ # get leader info after stop
after_leader_infos = self.get_leader_infos(db_name)
-
+
revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos)
# append rows of stablename when dnode stop make sure revote leaders
@@ -470,7 +470,7 @@ class TDTestCase:
else:
tdLog.notice("===== leader of database {} is not ok , append rows fail =====".format(db_name))
- # begin start dnode
+ # begin start dnode
start = time.time()
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
@@ -478,29 +478,29 @@ class TDTestCase:
time_cost = int(end -start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
-
- # create new stables again
+
+ # create new stables again
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
-
+
def unsync_run_case(self):
def _restart_dnode_of_db_unsync(dbname):
-
+
tdDnodes=cluster.dnodes
self.stop_dnode_id = self._get_stop_dnode_id(dbname)
# begin restart dnode
- # force stop taosd by kill -9
- # get leader info before stop
+ # force stop taosd by kill -9
+ # get leader info before stop
before_leader_infos = self.get_leader_infos(db_name)
self.force_stop_dnode(self.stop_dnode_id)
self.wait_stop_dnode_OK()
# check revote leader when restart servers
- # get leader info after stop
+ # get leader info after stop
after_leader_infos = self.get_leader_infos(db_name)
revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos)
# append rows of stablename when dnode stop make sure revote leaders
@@ -520,30 +520,30 @@ class TDTestCase:
tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
- # create new stables again
+ # create new stables again
tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
-
+
tdDnodes[self.stop_dnode_id-1].starttaosd()
start = time.time()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
-
+
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
-
+
def _create_threading(dbname):
self.current_thread = threading.Thread(target=_restart_dnode_of_db_unsync, args=(dbname,))
return self.current_thread
-
+
'''
- in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
+ in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive
'''
for loop in range(self.loop_restart_times):
db_name = "unsync_db_{}".format(loop)
@@ -553,7 +553,7 @@ class TDTestCase:
tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
- # create sync threading and start it
+ # create sync threading and start it
self.current_thread = _create_threading(db_name)
self.current_thread.start()
@@ -564,7 +564,7 @@ class TDTestCase:
self.current_thread.join()
- def run(self):
+ def run(self):
# basic insert and check of cluster
self.check_setup_cluster_status()
@@ -577,4 +577,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py
index 2bfe544749..4b404a5906 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py
@@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -30,9 +30,9 @@ class TDTestCase:
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
- self.replica = 3
+ self.replica = 3
self.vgroups = 1
- self.tb_nums = 10
+ self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
@@ -40,7 +40,7 @@ class TDTestCase:
self.max_restart_time = 10
self.try_check_times = 10
self.query_times = 100
-
+
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -103,7 +103,7 @@ class TDTestCase:
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
-
+
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
@@ -112,7 +112,7 @@ class TDTestCase:
tags (t1 int)
'''.format(stablename)
)
-
+
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
@@ -123,11 +123,11 @@ class TDTestCase:
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
-
+
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
-
+
tdSql.execute("use {}".format(dbname))
-
+
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
@@ -137,7 +137,7 @@ class TDTestCase:
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
-
+
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
@@ -145,8 +145,8 @@ class TDTestCase:
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
-
- count = 0
+
+ count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
@@ -160,14 +160,14 @@ class TDTestCase:
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
-
+
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
- count = 0
+ count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
@@ -186,7 +186,7 @@ class TDTestCase:
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
- leader_infos = vgroup_info[3:-4]
+ leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='follower':
@@ -198,7 +198,7 @@ class TDTestCase:
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
-
+
def _get_status():
newTdSql=tdCom.newTdSql()
@@ -211,7 +211,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="offline":
@@ -221,7 +221,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
-
+
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
@@ -233,7 +233,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="ready":
@@ -327,7 +327,7 @@ class TDTestCase:
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
-
+
def force_stop_dnode(self, dnode_id ):
@@ -335,7 +335,7 @@ class TDTestCase:
port = None
for dnode_info in tdSql.queryResult:
if dnode_id == dnode_info[0]:
- port = dnode_info[1].split(":")[-1]
+ port = dnode_info[1].split(":")[-1]
break
else:
continue
@@ -349,9 +349,9 @@ class TDTestCase:
os.system(ps_kill_taosd)
def basic_query_task(self,dbname ,stablename):
-
+
sql = "select * from {}.{} ;".format(dbname , stablename)
-
+
count = 0
while count < self.query_times:
os.system(''' taos -s '{}' >>/dev/null '''.format(sql))
@@ -364,18 +364,18 @@ class TDTestCase:
self.thread_list.append(task)
for thread in self.thread_list:
-
+
thread.start()
return self.thread_list
def stop_follower_when_query_going(self):
-
+
tdDnodes = cluster.dnodes
self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums)
- # let query task start
+ # let query task start
self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' )
# force stop follower
@@ -390,22 +390,22 @@ class TDTestCase:
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
-
+
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
-
+
for thread in self.thread_list:
thread.join()
- def run(self):
+ def run(self):
# basic check of cluster
self.check_setup_cluster_status()
self.stop_follower_when_query_going()
-
+
def stop(self):
@@ -413,4 +413,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py
index 2a4e43d904..8310522bd9 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py
@@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -30,9 +30,9 @@ class TDTestCase:
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
- self.replica = 3
+ self.replica = 3
self.vgroups = 1
- self.tb_nums = 10
+ self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
@@ -40,7 +40,7 @@ class TDTestCase:
self.max_restart_time = 10
self.try_check_times = 10
self.query_times = 100
-
+
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -103,7 +103,7 @@ class TDTestCase:
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
-
+
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
@@ -112,7 +112,7 @@ class TDTestCase:
tags (t1 int)
'''.format(stablename)
)
-
+
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
@@ -123,11 +123,11 @@ class TDTestCase:
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
-
+
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
-
+
tdSql.execute("use {}".format(dbname))
-
+
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
@@ -137,7 +137,7 @@ class TDTestCase:
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
-
+
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
@@ -145,8 +145,8 @@ class TDTestCase:
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
-
- count = 0
+
+ count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
@@ -160,14 +160,14 @@ class TDTestCase:
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
-
+
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
- count = 0
+ count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
@@ -186,7 +186,7 @@ class TDTestCase:
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
- leader_infos = vgroup_info[3:-4]
+ leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='follower':
@@ -198,7 +198,7 @@ class TDTestCase:
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
-
+
def _get_status():
newTdSql=tdCom.newTdSql()
@@ -211,7 +211,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="offline":
@@ -221,7 +221,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
-
+
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
@@ -233,7 +233,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="ready":
@@ -327,7 +327,7 @@ class TDTestCase:
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
-
+
def force_stop_dnode(self, dnode_id ):
@@ -335,7 +335,7 @@ class TDTestCase:
port = None
for dnode_info in tdSql.queryResult:
if dnode_id == dnode_info[0]:
- port = dnode_info[1].split(":")[-1]
+ port = dnode_info[1].split(":")[-1]
break
else:
continue
@@ -349,9 +349,9 @@ class TDTestCase:
os.system(ps_kill_taosd)
def basic_query_task(self,dbname ,stablename):
-
+
sql = "select * from {}.{} ;".format(dbname , stablename)
-
+
count = 0
while count < self.query_times:
os.system(''' taos -s '{}' >>/dev/null '''.format(sql))
@@ -364,18 +364,18 @@ class TDTestCase:
self.thread_list.append(task)
for thread in self.thread_list:
-
+
thread.start()
return self.thread_list
def stop_follower_when_query_going(self):
-
+
tdDnodes = cluster.dnodes
self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums)
- # let query task start
+ # let query task start
self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' )
# force stop follower
@@ -390,22 +390,22 @@ class TDTestCase:
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
-
+
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
-
+
for thread in self.thread_list:
thread.join()
- def run(self):
+ def run(self):
# basic check of cluster
self.check_setup_cluster_status()
self.stop_follower_when_query_going()
-
+
def stop(self):
@@ -413,4 +413,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py
index 41606946f6..752a347d83 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py
@@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -30,9 +30,9 @@ class TDTestCase:
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
- self.replica = 3
+ self.replica = 3
self.vgroups = 1
- self.tb_nums = 10
+ self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
@@ -40,7 +40,7 @@ class TDTestCase:
self.max_restart_time = 10
self.try_check_times = 10
self.query_times = 100
-
+
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -103,7 +103,7 @@ class TDTestCase:
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
-
+
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
@@ -112,7 +112,7 @@ class TDTestCase:
tags (t1 int)
'''.format(stablename)
)
-
+
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
@@ -123,11 +123,11 @@ class TDTestCase:
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
-
+
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
-
+
tdSql.execute("use {}".format(dbname))
-
+
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
@@ -137,7 +137,7 @@ class TDTestCase:
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
-
+
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
@@ -145,8 +145,8 @@ class TDTestCase:
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
-
- count = 0
+
+ count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
@@ -160,14 +160,14 @@ class TDTestCase:
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
-
+
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
- count = 0
+ count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
@@ -186,7 +186,7 @@ class TDTestCase:
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
- leader_infos = vgroup_info[3:-4]
+ leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='leader':
@@ -198,7 +198,7 @@ class TDTestCase:
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
-
+
def _get_status():
newTdSql=tdCom.newTdSql()
@@ -211,7 +211,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="offline":
@@ -229,7 +229,7 @@ class TDTestCase:
tdLog.info(" ===maybe revote not occured , there is no dnode offline ====")
else:
for vgroup_info in vote_act:
- for ind , role in enumerate(vgroup_info):
+ for ind , role in enumerate(vgroup_info):
if role==self.stop_dnode_id:
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
@@ -243,7 +243,7 @@ class TDTestCase:
return check_status
def wait_start_dnode_OK(self):
-
+
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
@@ -255,7 +255,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="ready":
@@ -349,10 +349,10 @@ class TDTestCase:
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
-
-
+
+
def get_leader_infos(self ,dbname):
-
+
newTdSql=tdCom.newTdSql()
newTdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = newTdSql.queryResult
@@ -369,7 +369,7 @@ class TDTestCase:
port = None
for dnode_info in tdSql.queryResult:
if dnode_id == dnode_info[0]:
- port = dnode_info[1].split(":")[-1]
+ port = dnode_info[1].split(":")[-1]
break
else:
continue
@@ -383,9 +383,9 @@ class TDTestCase:
os.system(ps_kill_taosd)
def basic_query_task(self,dbname ,stablename):
-
+
sql = "select * from {}.{} ;".format(dbname , stablename)
-
+
count = 0
while count < self.query_times:
os.system(''' taos -s '{}' >>/dev/null '''.format(sql))
@@ -398,35 +398,35 @@ class TDTestCase:
self.thread_list.append(task)
for thread in self.thread_list:
-
+
thread.start()
return self.thread_list
def stop_follower_when_query_going(self):
-
+
tdDnodes = cluster.dnodes
self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums)
- # let query task start
+ # let query task start
self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' )
# force stop follower
for loop in range(self.loop_restart_times):
tdLog.debug(" ==== this is {}_th restart follower of database {} ==== ".format(loop ,self.db_name))
- # get leader info before stop
+ # get leader info before stop
before_leader_infos = self.get_leader_infos(self.db_name)
self.stop_dnode_id = self._get_stop_dnode_id(self.db_name)
tdDnodes[self.stop_dnode_id-1].stoptaosd()
-
+
start = time.time()
- # get leader info after stop
+ # get leader info after stop
after_leader_infos = self.get_leader_infos(self.db_name)
-
+
revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos)
while not revote_status:
@@ -434,7 +434,7 @@ class TDTestCase:
revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos)
end = time.time()
- time_cost = end - start
+ time_cost = end - start
tdLog.debug(" ==== revote leader of database {} cost time {} ====".format(self.db_name , time_cost))
self.wait_stop_dnode_OK()
@@ -444,22 +444,22 @@ class TDTestCase:
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
-
+
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
-
+
for thread in self.thread_list:
thread.join()
- def run(self):
+ def run(self):
# basic check of cluster
self.check_setup_cluster_status()
self.stop_follower_when_query_going()
-
+
def stop(self):
@@ -467,4 +467,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py
index 5ddcf1c70e..9f64faa446 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py
@@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -30,9 +30,9 @@ class TDTestCase:
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
- self.replica = 3
+ self.replica = 3
self.vgroups = 1
- self.tb_nums = 10
+ self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
@@ -40,7 +40,7 @@ class TDTestCase:
self.max_restart_time = 10
self.try_check_times = 10
self.query_times = 100
-
+
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -103,7 +103,7 @@ class TDTestCase:
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
-
+
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
@@ -112,7 +112,7 @@ class TDTestCase:
tags (t1 int)
'''.format(stablename)
)
-
+
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
@@ -123,11 +123,11 @@ class TDTestCase:
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
-
+
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
-
+
tdSql.execute("use {}".format(dbname))
-
+
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
@@ -137,7 +137,7 @@ class TDTestCase:
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
-
+
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
@@ -145,8 +145,8 @@ class TDTestCase:
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
-
- count = 0
+
+ count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
@@ -160,14 +160,14 @@ class TDTestCase:
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
-
+
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
- count = 0
+ count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
@@ -186,7 +186,7 @@ class TDTestCase:
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
- leader_infos = vgroup_info[3:-4]
+ leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='leader':
@@ -198,7 +198,7 @@ class TDTestCase:
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
-
+
def _get_status():
newTdSql=tdCom.newTdSql()
@@ -211,7 +211,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="offline":
@@ -229,7 +229,7 @@ class TDTestCase:
tdLog.info(" ===maybe revote not occured , there is no dnode offline ====")
else:
for vgroup_info in vote_act:
- for ind , role in enumerate(vgroup_info):
+ for ind , role in enumerate(vgroup_info):
if role==self.stop_dnode_id:
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
@@ -243,7 +243,7 @@ class TDTestCase:
return check_status
def wait_start_dnode_OK(self):
-
+
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
@@ -255,7 +255,7 @@ class TDTestCase:
if id == self.stop_dnode_id:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="ready":
@@ -349,10 +349,10 @@ class TDTestCase:
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
-
-
+
+
def get_leader_infos(self ,dbname):
-
+
newTdSql=tdCom.newTdSql()
newTdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = newTdSql.queryResult
@@ -369,7 +369,7 @@ class TDTestCase:
port = None
for dnode_info in tdSql.queryResult:
if dnode_id == dnode_info[0]:
- port = dnode_info[1].split(":")[-1]
+ port = dnode_info[1].split(":")[-1]
break
else:
continue
@@ -383,9 +383,9 @@ class TDTestCase:
os.system(ps_kill_taosd)
def basic_query_task(self,dbname ,stablename):
-
+
sql = "select * from {}.{} ;".format(dbname , stablename)
-
+
count = 0
while count < self.query_times:
os.system(''' taos -s '{}' >>/dev/null '''.format(sql))
@@ -398,35 +398,35 @@ class TDTestCase:
self.thread_list.append(task)
for thread in self.thread_list:
-
+
thread.start()
return self.thread_list
def stop_follower_when_query_going(self):
-
+
tdDnodes = cluster.dnodes
self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums)
- # let query task start
+ # let query task start
self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' )
# force stop follower
for loop in range(self.loop_restart_times):
tdLog.debug(" ==== this is {}_th restart follower of database {} ==== ".format(loop ,self.db_name))
- # get leader info before stop
+ # get leader info before stop
before_leader_infos = self.get_leader_infos(self.db_name)
self.stop_dnode_id = self._get_stop_dnode_id(self.db_name)
self.force_stop_dnode(self.stop_dnode_id)
-
+
start = time.time()
- # get leader info after stop
+ # get leader info after stop
after_leader_infos = self.get_leader_infos(self.db_name)
-
+
revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos)
while not revote_status:
@@ -434,7 +434,7 @@ class TDTestCase:
revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos)
end = time.time()
- time_cost = end - start
+ time_cost = end - start
tdLog.debug(" ==== revote leader of database {} cost time {} ====".format(self.db_name , time_cost))
self.wait_stop_dnode_OK()
@@ -444,22 +444,22 @@ class TDTestCase:
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
-
+
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
-
+
for thread in self.thread_list:
thread.join()
- def run(self):
+ def run(self):
# basic check of cluster
self.check_setup_cluster_status()
self.stop_follower_when_query_going()
-
+
def stop(self):
@@ -467,4 +467,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py
index 78f6ee153b..ff7f84a29d 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py
@@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -25,9 +25,9 @@ class TDTestCase:
self.dnode_list = {}
self.ts = 1483200000000
self.db_name ='testdb'
- self.replica = 1
+ self.replica = 1
self.vgroups = 2
- self.tb_nums = 10
+ self.tb_nums = 10
self.row_nums = 100
self.max_vote_time_cost = 10 # seconds
@@ -101,7 +101,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
-
+
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
@@ -134,7 +134,7 @@ class TDTestCase:
vgroup_id = vgroup_info[0]
vgroup_status = []
for ind , role in enumerate(vgroup_info[3:-4]):
-
+
if ind%2==0:
continue
else:
@@ -151,7 +151,7 @@ class TDTestCase:
while not status:
time.sleep(0.1)
status = self.check_vgroups_init_done(dbname)
-
+
# tdLog.notice("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
end = time.time()
cost_time = end - start
@@ -159,16 +159,16 @@ class TDTestCase:
# os.system("taos -s 'show {}.vgroups;'".format(dbname))
if cost_time >= self.max_vote_time_cost:
tdLog.exit(" ==== database %s vote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) )
-
-
+
+
return cost_time
-
+
def test_init_vgroups_time_costs(self):
tdLog.notice(" ====start check time cost about vgroups vote leaders ==== ")
tdLog.notice(" ==== current max time cost is set value : {} =======".format(self.max_vote_time_cost))
- # create database replica 3 vgroups 1
+ # create database replica 3 vgroups 1
db1 = 'db_1'
create_db_replica_3_vgroups_1 = "create database {} replica 3 vgroups 1".format(db1)
@@ -189,10 +189,10 @@ class TDTestCase:
tdLog.notice('=======database {} replica 3 vgroups 100 ======'.format(db3))
tdSql.execute(create_db_replica_3_vgroups_100)
self.vote_leader_time_costs(db3)
-
-
- def run(self):
+
+
+ def run(self):
self.check_setup_cluster_status()
self.test_init_vgroups_time_costs()
@@ -203,4 +203,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py
index 32ee0a8711..97a497dfe9 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py
@@ -4,7 +4,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
-import os
+import os
from util.log import *
from util.sql import *
@@ -13,7 +13,7 @@ from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
-import time
+import time
import random
import socket
import subprocess
@@ -27,9 +27,9 @@ class TDTestCase:
self.dnode_list = {}
self.ts = 1483200000000
self.db_name ='testdb'
- self.replica = 1
+ self.replica = 1
self.vgroups = 2
- self.tb_nums = 10
+ self.tb_nums = 10
self.row_nums = 100
self.max_vote_time_cost = 10 # seconds
self.stop_dnode = None
@@ -104,7 +104,7 @@ class TDTestCase:
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
-
+
for i in range(5):
tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i))
tdSql.query("show stables")
@@ -133,7 +133,7 @@ class TDTestCase:
self.stop_dnode = random.sample(only_dnode_list , 1 )[0]
return self.stop_dnode
-
+
def check_vgroups_revote_leader(self,dbname):
status = True
@@ -145,7 +145,7 @@ class TDTestCase:
vgroup_status = []
vgroups_leader_follower = vgroup_info[3:-4]
for ind , role in enumerate(vgroups_leader_follower):
-
+
if ind%2==0:
if role == stop_dnode_id and vgroups_leader_follower[ind+1]=="offline":
tdLog.notice("====== dnode {} has offline , endpoint is {}".format(stop_dnode_id , self.stop_dnode))
@@ -174,7 +174,7 @@ class TDTestCase:
if endpoint == self.stop_dnode:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="offline":
@@ -184,7 +184,7 @@ class TDTestCase:
tdLog.notice("==== stop_dnode has stopped , endpoint is {}".format(self.stop_dnode))
def wait_start_dnode_OK(self):
-
+
def _get_status():
status = ""
@@ -196,7 +196,7 @@ class TDTestCase:
if endpoint == self.stop_dnode:
status = dnode_status
break
- return status
+ return status
status = _get_status()
while status !="ready":
@@ -205,8 +205,8 @@ class TDTestCase:
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.notice("==== stop_dnode has restart , endpoint is {}".format(self.stop_dnode))
-
-
+
+
def random_stop_One_dnode(self):
self.stop_dnode = self._get_stop_dnode()
stop_dnode_id = self.dnode_list[self.stop_dnode][0]
@@ -217,7 +217,7 @@ class TDTestCase:
# os.system("taos -s 'show dnodes;'")
def Restart_stop_dnode(self):
-
+
tdDnodes=cluster.dnodes
stop_dnode_id = self.dnode_list[self.stop_dnode][0]
tdDnodes[stop_dnode_id-1].starttaosd()
@@ -225,7 +225,7 @@ class TDTestCase:
# os.system("taos -s 'show dnodes;'")
def check_vgroups_init_done(self,dbname):
-
+
status = True
tdSql.query("show {}.vgroups".format(dbname))
@@ -233,7 +233,7 @@ class TDTestCase:
vgroup_id = vgroup_info[0]
vgroup_status = []
for ind , role in enumerate(vgroup_info[3:-4]):
-
+
if ind%2==0:
continue
else:
@@ -249,7 +249,7 @@ class TDTestCase:
while not status:
time.sleep(0.1)
status = self.check_vgroups_init_done(dbname)
-
+
# tdLog.notice("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
end = time.time()
cost_time = end - start
@@ -257,10 +257,10 @@ class TDTestCase:
# os.system("taos -s 'show {}.vgroups;'".format(dbname))
if cost_time >= self.max_vote_time_cost:
tdLog.exit(" ==== database %s vote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) )
-
+
return cost_time
-
+
def revote_leader_time_costs(self,dbname):
start = time.time()
@@ -268,7 +268,7 @@ class TDTestCase:
while not status:
time.sleep(0.1)
status = self.check_vgroups_revote_leader(dbname)
-
+
# tdLog.notice("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
end = time.time()
cost_time = end - start
@@ -276,10 +276,10 @@ class TDTestCase:
# os.system("taos -s 'show {}.vgroups;'".format(dbname))
if cost_time >= self.max_vote_time_cost:
tdLog.exit(" ==== database %s revote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) )
-
-
+
+
return cost_time
-
+
def exec_revote_action(self,dbname):
tdSql.query("show {}.vgroups".format(dbname))
@@ -296,13 +296,13 @@ class TDTestCase:
after_vgroups = set()
for vgroup_info in after_revote:
after_vgroups.add(vgroup_info[3:-4])
-
+
vote_act = set(set(after_vgroups)-set(before_vgroups))
if not vote_act:
tdLog.exit(" ===maybe revote not occured , there is no dnode offline ====")
else:
for vgroup_info in vote_act:
- for ind , role in enumerate(vgroup_info):
+ for ind , role in enumerate(vgroup_info):
if role==self.dnode_list[self.stop_dnode][0]:
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
@@ -322,7 +322,7 @@ class TDTestCase:
tdLog.notice(" ====start check time cost about vgroups vote leaders ==== ")
tdLog.notice(" ==== current max time cost is set value : {} =======".format(self.max_vote_time_cost))
- # create database replica 3 vgroups 1
+ # create database replica 3 vgroups 1
db1 = 'db_1'
create_db_replica_3_vgroups_1 = "create database {} replica 3 vgroups 1".format(db1)
@@ -346,13 +346,13 @@ class TDTestCase:
tdSql.execute(create_db_replica_3_vgroups_100)
self.vote_leader_time_costs(db3)
self.exec_revote_action(db3)
-
-
- def run(self):
+
+
+ def run(self):
self.check_setup_cluster_status()
self.test_init_vgroups_time_costs()
-
+
@@ -361,4 +361,4 @@ class TDTestCase:
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
-tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/7-tmq/basic5.py b/tests/system-test/7-tmq/basic5.py
index 70813eba96..94201a335d 100644
--- a/tests/system-test/7-tmq/basic5.py
+++ b/tests/system-test/7-tmq/basic5.py
@@ -56,7 +56,7 @@ class TDTestCase:
return cur
def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl):
- tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
+ tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
tsql.execute("use %s" %dbName)
tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
pre_create = "create table"
@@ -69,7 +69,7 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum))
return
@@ -96,7 +96,7 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
-
+
def prepareEnv(self, **parameterDict):
print ("input parameters:")
print (parameterDict)
@@ -115,7 +115,7 @@ class TDTestCase:
parameterDict["ctbNum"],\
parameterDict["rowsPerTbl"],\
parameterDict["batchNum"],\
- parameterDict["startTs"])
+ parameterDict["startTs"])
return
@@ -135,34 +135,34 @@ class TDTestCase:
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
prepareEnvThread.join()
-
+
# wait stb ready
while 1:
- tdSql.query("show %s.stables"%parameterDict['dbName'])
- if tdSql.getRows() == 1:
+ tdSql.query("show %s.stables"%parameterDict['dbName'])
+ if tdSql.getRows() == 1:
break
else:
time.sleep(1)
tdLog.info("create topics from super table")
topicFromStb = 'topic_stb_column'
- topicFromCtb = 'topic_ctb_column'
-
+ topicFromCtb = 'topic_ctb_column'
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName']))
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s_0" %(topicFromCtb, parameterDict['dbName'], parameterDict['stbName']))
-
+
time.sleep(1)
tdSql.query("show topics")
#tdSql.checkRows(2)
topic1 = tdSql.getData(0 , 0)
topic2 = tdSql.getData(1 , 0)
-
+
tdLog.info("show topics: %s, %s"%(topic1, topic2))
if topic1 != topicFromStb and topic1 != topicFromCtb:
- tdLog.exit("topic error1")
+ tdLog.exit("topic error1")
if topic2 != topicFromStb and topic2 != topicFromCtb:
- tdLog.exit("topic error2")
-
+ tdLog.exit("topic error2")
+
tdLog.info("create consume info table and consume result table")
cdbName = parameterDict["dbName"]
tdSql.query("create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)")
@@ -179,7 +179,7 @@ class TDTestCase:
sql = "insert into consumeinfo values "
sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata)
tdSql.query(sql)
-
+
tdLog.info("check stb if there are data")
while 1:
tdSql.query("select count(*) from %s"%parameterDict["stbName"])
@@ -190,26 +190,26 @@ class TDTestCase:
break
else:
time.sleep(1)
-
+
tdLog.info("start consume processor")
pollDelay = 20
showMsg = 1
showRow = 1
-
+
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
- shellCmd += "> nul 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
+ shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
- shellCmd += "> /dev/null 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
+ shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
- os.system(shellCmd)
+ os.system(shellCmd)
# wait for data ready
# prepareEnvThread.join()
-
+
tdLog.info("insert process end, and start to check consume result")
while 1:
tdSql.query("select * from consumeresult")
@@ -229,7 +229,7 @@ class TDTestCase:
tdSql.query("drop topic %s"%topicFromCtb)
tdLog.printNoPrefix("======== test case 1 end ...... ")
-
+
def tmqCase2(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 2: add child table with consuming ")
# create and start thread
@@ -246,13 +246,13 @@ class TDTestCase:
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
prepareEnvThread.join()
-
+
# wait db ready
while 1:
tdSql.query("show databases")
if tdSql.getRows() == 4:
print ('==================================================')
- print (tdSql.getData(0,0), tdSql.getData(1,0),tdSql.getData(2,0))
+ print (tdSql.getData(0,0), tdSql.getData(1,0),tdSql.getData(2,0))
index = 0
if tdSql.getData(0,0) == parameterDict['dbName']:
index = 0
@@ -264,7 +264,7 @@ class TDTestCase:
index = 3
else:
continue
-
+
if tdSql.getData(index,15) == 'ready':
print("******************** index: %d"%index)
break
@@ -272,12 +272,12 @@ class TDTestCase:
continue
else:
time.sleep(1)
-
+
tdSql.query("use %s"%parameterDict['dbName'])
# wait stb ready
while 1:
tdSql.query("show %s.stables"%parameterDict['dbName'])
- if tdSql.getRows() == 1:
+ if tdSql.getRows() == 1:
break
else:
time.sleep(1)
@@ -285,20 +285,20 @@ class TDTestCase:
tdLog.info("create topics from super table")
topicFromStb = 'topic_stb_column2'
topicFromCtb = 'topic_ctb_column2'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName']))
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s_0" %(topicFromCtb, parameterDict['dbName'], parameterDict['stbName']))
-
+
time.sleep(1)
tdSql.query("show topics")
topic1 = tdSql.getData(0 , 0)
topic2 = tdSql.getData(1 , 0)
tdLog.info("show topics: %s, %s"%(topic1, topic2))
if topic1 != topicFromStb and topic1 != topicFromCtb:
- tdLog.exit("topic error1")
+ tdLog.exit("topic error1")
if topic2 != topicFromStb and topic2 != topicFromCtb:
- tdLog.exit("topic error2")
-
+ tdLog.exit("topic error2")
+
tdLog.info("create consume info table and consume result table")
cdbName = parameterDict["dbName"]
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName)
@@ -316,7 +316,7 @@ class TDTestCase:
sql = "insert into consumeinfo values "
sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata)
tdSql.query(sql)
-
+
tdLog.info("check stb if there are data")
while 1:
tdSql.query("select count(*) from %s"%parameterDict["stbName"])
@@ -327,21 +327,21 @@ class TDTestCase:
break
else:
time.sleep(1)
-
+
tdLog.info("start consume processor")
pollDelay = 100
showMsg = 1
showRow = 1
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
- shellCmd += "> nul 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
+ shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
- shellCmd += "> /dev/null 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
+ shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
- os.system(shellCmd)
+ os.system(shellCmd)
# create new child table and insert data
newCtbName = 'newctb'
@@ -354,7 +354,7 @@ class TDTestCase:
# wait for data ready
prepareEnvThread.join()
-
+
tdLog.info("insert process end, and start to check consume result")
while 1:
tdSql.query("select * from consumeresult")
@@ -366,7 +366,7 @@ class TDTestCase:
tdSql.checkData(0 , 1, consumerId)
tdSql.checkData(0 , 3, expectrowcnt)
-
+
tdSql.query("drop topic %s"%topicFromStb)
tdSql.query("drop topic %s"%topicFromCtb)
@@ -390,13 +390,13 @@ class TDTestCase:
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
prepareEnvThread.join()
-
+
# wait db ready
while 1:
tdSql.query("show databases")
- if tdSql.getRows() == 5:
+ if tdSql.getRows() == 5:
print ('==================================================dbname: %s'%parameterDict['dbName'])
- print (tdSql.getData(0,0), tdSql.getData(1,0),tdSql.getData(2,0),tdSql.getData(3,0),tdSql.getData(4,0))
+ print (tdSql.getData(0,0), tdSql.getData(1,0),tdSql.getData(2,0),tdSql.getData(3,0),tdSql.getData(4,0))
index = 0
if tdSql.getData(0,0) == parameterDict['dbName']:
index = 0
@@ -409,8 +409,8 @@ class TDTestCase:
elif tdSql.getData(4,0) == parameterDict['dbName']:
index = 4
else:
- continue
-
+ continue
+
if tdSql.getData(index,15) == 'ready':
print("******************** index: %d"%index)
break
@@ -418,16 +418,16 @@ class TDTestCase:
continue
else:
time.sleep(1)
-
+
tdSql.query("use %s"%parameterDict['dbName'])
# wait stb ready
while 1:
tdSql.query("show %s.stables"%parameterDict['dbName'])
- if tdSql.getRows() == 1:
+ if tdSql.getRows() == 1:
break
else:
time.sleep(1)
-
+
tdLog.info("create stable2 for the seconde topic")
parameterDict2 = {'cfg': '', \
'dbName': 'db3', \
@@ -439,23 +439,23 @@ class TDTestCase:
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict2['cfg'] = cfgPath
tdSql.execute("create stable if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(parameterDict2['dbName'], parameterDict2['stbName']))
-
+
tdLog.info("create topics from super table")
topicFromStb = 'topic_stb_column3'
topicFromStb2 = 'topic_stb_column32'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName']))
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb2, parameterDict2['dbName'], parameterDict2['stbName']))
-
+
tdSql.query("show topics")
topic1 = tdSql.getData(0 , 0)
topic2 = tdSql.getData(1 , 0)
tdLog.info("show topics: %s, %s"%(topic1, topic2))
if topic1 != topicFromStb and topic1 != topicFromStb2:
- tdLog.exit("topic error1")
+ tdLog.exit("topic error1")
if topic2 != topicFromStb and topic2 != topicFromStb2:
- tdLog.exit("topic error2")
-
+ tdLog.exit("topic error2")
+
tdLog.info("create consume info table and consume result table")
cdbName = parameterDict["dbName"]
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName)
@@ -472,7 +472,7 @@ class TDTestCase:
sql = "insert into consumeinfo values "
sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata)
tdSql.query(sql)
-
+
tdLog.info("check stb if there are data")
while 1:
tdSql.query("select count(*) from %s"%parameterDict["stbName"])
@@ -483,22 +483,22 @@ class TDTestCase:
break
else:
time.sleep(1)
-
+
tdLog.info("start consume processor")
pollDelay = 100
showMsg = 1
showRow = 1
-
+
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
- shellCmd += "> nul 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
+ shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
- shellCmd += "> /dev/null 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName)
+ shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
- os.system(shellCmd)
+ os.system(shellCmd)
# start the second thread to create new child table and insert data
prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2)
@@ -507,7 +507,7 @@ class TDTestCase:
# wait for data ready
prepareEnvThread.join()
prepareEnvThread2.join()
-
+
tdLog.info("insert process end, and start to check consume result")
while 1:
tdSql.query("select * from consumeresult")
@@ -519,7 +519,7 @@ class TDTestCase:
tdSql.checkData(0 , 1, consumerId)
tdSql.checkData(0 , 3, expectrowcnt)
-
+
tdSql.query("drop topic %s"%topicFromStb)
tdSql.query("drop topic %s"%topicFromStb2)
@@ -537,7 +537,7 @@ class TDTestCase:
tdLog.info("cfgPath: %s" % cfgPath)
self.tmqCase1(cfgPath, buildPath)
- self.tmqCase2(cfgPath, buildPath)
+ self.tmqCase2(cfgPath, buildPath)
self.tmqCase3(cfgPath, buildPath)
def stop(self):
diff --git a/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py b/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py
index 2216000214..4add73ec2b 100644
--- a/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py
+++ b/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 100
self.rowsPerTbl = 1000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -58,12 +58,12 @@ class TDTestCase:
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("flush db to let data falls into the disk")
tdSql.query("flush database %s"%(paraDict['dbName']))
return
@@ -93,18 +93,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- # tdSql.query(queryString)
+ # tdSql.query(queryString)
# expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@@ -121,29 +121,29 @@ class TDTestCase:
paraDict['batchNum'] = 100
paraDict['startTs'] = paraDict['startTs'] + self.rowsPerTbl
pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
-
+
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
pInsertThread.join()
-
- tdSql.query(queryString)
+
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
-
- tdLog.info("wait the consume result")
+
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
if expectRowsList[0] != resultList[0]:
tdLog.exit("%d tmq consume rows error!"%consumerId)
- # tmqCom.checkFileContent(consumerId, queryString)
+ # tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("flush database %s"%(paraDict['dbName']))
-
+
for i in range(len(topicNameList)):
- tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
+ tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
@@ -173,18 +173,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
@@ -200,36 +200,36 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeRows = resultList[0]
-
+
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(actConsumeRows, expectrowcnt, totalRowsInserted))
if not (expectrowcnt <= actConsumeRows and totalRowsInserted >= actConsumeRows):
tdLog.exit("%d tmq consume rows error!"%consumerId)
-
+
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 2
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
actConsumeRows = resultList[0]
- tdLog.info("act consume rows: %d, expect rows: %d, act insert rows: %d"%(actConsumeRows, expectrowcnt, totalRowsInserted))
+ tdLog.info("act consume rows: %d, expect rows: %d, act insert rows: %d"%(actConsumeRows, expectrowcnt, totalRowsInserted))
if not ((actConsumeRows >= expectrowcnt) and (totalRowsInserted > actConsumeRows)):
tdLog.exit("%d tmq consume rows error!"%consumerId)
-
+
for i in range(len(topicNameList)):
- tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
+ tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")
diff --git a/tests/system-test/7-tmq/dataFromTsdbNWal.py b/tests/system-test/7-tmq/dataFromTsdbNWal.py
index faa70f4820..950c8fdcf6 100644
--- a/tests/system-test/7-tmq/dataFromTsdbNWal.py
+++ b/tests/system-test/7-tmq/dataFromTsdbNWal.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 1
self.rowsPerTbl = 10000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -58,12 +58,12 @@ class TDTestCase:
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("flush db to let data falls into the disk")
tdSql.query("flush database %s"%(paraDict['dbName']))
return
@@ -93,18 +93,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- # tdSql.query(queryString)
+ # tdSql.query(queryString)
# expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@@ -121,29 +121,29 @@ class TDTestCase:
paraDict['batchNum'] = 100
paraDict['startTs'] = paraDict['startTs'] + self.rowsPerTbl
pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
-
+
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
pInsertThread.join()
-
- tdSql.query(queryString)
+
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
-
- tdLog.info("wait the consume result")
+
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
if expectRowsList[0] != resultList[0]:
tdLog.exit("%d tmq consume rows error!"%consumerId)
- tmqCom.checkFileContent(consumerId, queryString)
+ tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("flush database %s"%(paraDict['dbName']))
for i in range(len(topicNameList)):
- tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
+ tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
@@ -173,18 +173,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
@@ -200,36 +200,36 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeRows = resultList[0]
-
+
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(actConsumeRows, expectrowcnt, totalRowsInserted))
if not (expectrowcnt <= actConsumeRows and totalRowsInserted >= actConsumeRows):
tdLog.exit("%d tmq consume rows error!"%consumerId)
-
+
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 2
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
actConsumeRows = resultList[0]
- tdLog.info("act consume rows: %d, expect rows: %d, act insert rows: %d"%(actConsumeRows, expectrowcnt, totalRowsInserted))
+ tdLog.info("act consume rows: %d, expect rows: %d, act insert rows: %d"%(actConsumeRows, expectrowcnt, totalRowsInserted))
if not ((actConsumeRows >= expectrowcnt) and (totalRowsInserted > actConsumeRows)):
tdLog.exit("%d tmq consume rows error!"%consumerId)
for i in range(len(topicNameList)):
- tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
+ tmqCom.waitSubscriptionExit(tdSql,topicNameList[i])
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")
diff --git a/tests/system-test/7-tmq/db.py b/tests/system-test/7-tmq/db.py
index 1fd0638d17..da5d7fefd2 100644
--- a/tests/system-test/7-tmq/db.py
+++ b/tests/system-test/7-tmq/db.py
@@ -56,12 +56,12 @@ class TDTestCase:
print(cur)
return cur
- def initConsumerTable(self,cdbName='cdb'):
+ def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("drop database if exists %s "%(cdbName))
tdSql.query("create database %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
- tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
+ tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
@@ -75,7 +75,7 @@ class TDTestCase:
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
- def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@@ -90,11 +90,11 @@ class TDTestCase:
break
else:
time.sleep(5)
-
+
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
-
+
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@@ -102,14 +102,14 @@ class TDTestCase:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
-
+
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
- shellCmd += "> nul 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
@@ -139,7 +139,7 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@@ -158,7 +158,7 @@ class TDTestCase:
ctbDict[i] = 0
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfCtb = 0
+ rowsOfCtb = 0
while rowsOfCtb < rowsPerTbl:
for i in range(ctbNum):
sql += " %s.%s_%d values "%(dbName,ctbPrefix,i)
@@ -185,7 +185,7 @@ class TDTestCase:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfSql = 0
+ rowsOfSql = 0
for i in range(ctbNum):
sql += " %s_%d values "%(ctbPrefix,i)
for j in range(rowsPerTbl):
@@ -216,7 +216,7 @@ class TDTestCase:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfSql = 0
+ rowsOfSql = 0
for i in range(ctbNum):
sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i)
for j in range(rowsPerTbl):
@@ -235,8 +235,8 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
-
- def prepareEnv(self, **parameterDict):
+
+ def prepareEnv(self, **parameterDict):
# create new connector for my thread
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
@@ -255,7 +255,7 @@ class TDTestCase:
return
def tmqCase1(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 1: ")
+ tdLog.printNoPrefix("======== test case 1: ")
'''
subscribe one db, multi normal table which have not same schema, and include rows of all tables in one insert sql
'''
@@ -274,11 +274,11 @@ class TDTestCase:
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
tdSql.execute("create table %s.ntb0 (ts timestamp, c1 int)"%(parameterDict["dbName"]))
tdSql.execute("create table %s.ntb1 (ts timestamp, c1 int, c2 float)"%(parameterDict["dbName"]))
- tdSql.execute("create table %s.ntb2 (ts timestamp, c1 int, c2 float, c3 binary(32))"%(parameterDict["dbName"]))
+ tdSql.execute("create table %s.ntb2 (ts timestamp, c1 int, c2 float, c3 binary(32))"%(parameterDict["dbName"]))
tdSql.execute("create table %s.ntb3 (ts timestamp, c1 int, c2 float, c3 binary(32), c4 timestamp)"%(parameterDict["dbName"]))
tdSql.execute("insert into %s.ntb0 values(now, 1) %s.ntb1 values(now, 1, 1) %s.ntb2 values(now, 1, 1, '1') %s.ntb3 values(now, 1, 1, '1', now)"%(parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"]))
@@ -301,7 +301,7 @@ class TDTestCase:
tdLog.info("create topics from db")
topicFromDb = 'topic_db_mulit_tbl'
-
+
tdSql.execute("create topic %s as database %s" %(topicFromDb, parameterDict['dbName']))
consumerId = 0
expectrowcnt = numOfNtb * rowsOfPerNtb
@@ -324,7 +324,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -334,7 +334,7 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 2: ")
+ tdLog.printNoPrefix("======== test case 2: ")
'''
subscribe one stb, multi child talbe and normal table which have not same schema, and include rows of all tables in one insert sql
'''
@@ -355,7 +355,7 @@ class TDTestCase:
parameterDict['cfg'] = cfgPath
dbName = parameterDict["dbName"]
-
+
self.create_database(tdSql, dbName)
tdSql.execute("create stable %s.stb (ts timestamp, s1 bigint, s2 binary(32), s3 double) tags (t1 int, t2 binary(32))"%(dbName))
@@ -364,7 +364,7 @@ class TDTestCase:
tdSql.execute("create table %s.ntb0 (ts timestamp, c1 binary(32))"%(dbName))
tdSql.execute("create table %s.ntb1 (ts timestamp, c1 binary(32), c2 float)"%(dbName))
- tdSql.execute("create table %s.ntb2 (ts timestamp, c1 int, c2 float, c3 binary(32))"%(dbName))
+ tdSql.execute("create table %s.ntb2 (ts timestamp, c1 int, c2 float, c3 binary(32))"%(dbName))
tdSql.execute("create table %s.ntb3 (ts timestamp, c1 int, c2 float, c3 binary(32), c4 timestamp)"%(dbName))
tdSql.execute("insert into %s.ntb0 values(now, 'ntb0-11') \
@@ -401,7 +401,7 @@ class TDTestCase:
tdLog.info("create topics from db")
topicFromStb = 'topic_stb_mulit_tbl'
-
+
tdSql.execute("create topic %s as stable %s.stb" %(topicFromStb, dbName))
consumerId = 0
expectrowcnt = numOfCtb * rowsOfPerNtb
@@ -424,7 +424,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -445,7 +445,7 @@ class TDTestCase:
tdLog.info("cfgPath: %s" % cfgPath)
self.tmqCase1(cfgPath, buildPath)
- self.tmqCase2(cfgPath, buildPath)
+ self.tmqCase2(cfgPath, buildPath)
def stop(self):
tdSql.close()
diff --git a/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py b/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py
index 4dac872fde..fc4fdcecf9 100644
--- a/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py
+++ b/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py
@@ -38,20 +38,20 @@ class TDTestCase:
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
tdLog.info(cmdStr)
os.system(cmdStr)
-
+
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
consumeFile = open(consumeRowsFile, mode='r')
queryFile = open(dstFile, mode='r')
-
+
# skip first line for it is schema
queryFile.readline()
while True:
dst = queryFile.readline()
src = consumeFile.readline()
-
+
if dst:
if dst != src:
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
@@ -84,7 +84,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=self.replica)
tdLog.info("create stb")
@@ -101,13 +101,13 @@ class TDTestCase:
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.asyncInsertDataByInterlace(paraDict)
tmqCom.create_ntable(tdSql, dbname=paraDict["dbName"], tbname_prefix="ntb", tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=1)
- tmqCom.insert_rows_into_ntbl(tdSql, dbname=paraDict["dbName"], tbname_prefix="ntb", tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"], startTs=paraDict["startTs"], tblNum=1, rows=2) # tdLog.info("restart taosd to ensure that the data falls into the disk")
+ tmqCom.insert_rows_into_ntbl(tdSql, dbname=paraDict["dbName"], tbname_prefix="ntb", tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"], startTs=paraDict["startTs"], tblNum=1, rows=2) # tdLog.info("restart taosd to ensure that the data falls into the disk")
tdSql.query("drop database %s"%paraDict["dbName"])
return
- def tmqCase1(self):
- tdLog.printNoPrefix("======== test case 1: ")
-
+ def tmqCase1(self):
+ tdLog.printNoPrefix("======== test case 1: ")
+
# create and start thread
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
@@ -132,14 +132,14 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha' "%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
-
+
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicFromStb1
@@ -166,13 +166,13 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsInserted = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt))
-
+
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error!")
-
- # tmqCom.checkFileContent(consumerId, queryString)
+
+ # tmqCom.checkFileContent(consumerId, queryString)
tmqCom.waitSubscriptionExit(tdSql, topicFromStb1)
tdSql.query("drop topic %s"%topicFromStb1)
diff --git a/tests/system-test/7-tmq/schema.py b/tests/system-test/7-tmq/schema.py
index 699c252c31..34d36e5792 100644
--- a/tests/system-test/7-tmq/schema.py
+++ b/tests/system-test/7-tmq/schema.py
@@ -56,12 +56,12 @@ class TDTestCase:
print(cur)
return cur
- def initConsumerTable(self,cdbName='cdb'):
+ def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("drop database if exists %s "%(cdbName))
tdSql.query("create database %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
- tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
+ tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
@@ -75,7 +75,7 @@ class TDTestCase:
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
- def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@@ -90,11 +90,11 @@ class TDTestCase:
break
else:
time.sleep(5)
-
+
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
-
+
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@@ -103,9 +103,9 @@ class TDTestCase:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
-
+
shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
@@ -135,7 +135,7 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@@ -154,7 +154,7 @@ class TDTestCase:
ctbDict[i] = 0
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfCtb = 0
+ rowsOfCtb = 0
while rowsOfCtb < rowsPerTbl:
for i in range(ctbNum):
sql += " %s.%s_%d values "%(dbName,ctbPrefix,i)
@@ -181,7 +181,7 @@ class TDTestCase:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfSql = 0
+ rowsOfSql = 0
for i in range(ctbNum):
sql += " %s_%d values "%(ctbPrefix,i)
for j in range(rowsPerTbl):
@@ -212,7 +212,7 @@ class TDTestCase:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfSql = 0
+ rowsOfSql = 0
for i in range(ctbNum):
sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i)
for j in range(rowsPerTbl):
@@ -231,8 +231,8 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
-
- def prepareEnv(self, **parameterDict):
+
+ def prepareEnv(self, **parameterDict):
# create new connector for my thread
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
@@ -265,7 +265,7 @@ class TDTestCase:
'batchNum': 23, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
tdLog.info("create database, super table, child table, normal table")
ntbName = 'ntb1'
self.create_database(tdSql, parameterDict["dbName"])
@@ -278,10 +278,10 @@ class TDTestCase:
tdLog.info("create topics from super table and normal table")
columnTopicFromStb = 'column_topic_from_stb1'
columnTopicFromNtb = 'column_topic_from_ntb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s" %(columnTopicFromStb, parameterDict['dbName'], parameterDict['stbName']))
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(columnTopicFromNtb, parameterDict['dbName'], ntbName))
-
+
tdLog.info("======== super table test:")
# alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
@@ -341,12 +341,12 @@ class TDTestCase:
tdLog.info("======== child table test:")
parameterDict['stbName'] = 'stb12'
ctbName = 'stb12_0'
- tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
+ tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
tdSql.query("create table %s.%s using %s.%s tags (1, '2', 3, '4', '5')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName']))
tdLog.info("create topics from child table")
columnTopicFromCtb = 'column_topic_from_ctb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s" %(columnTopicFromCtb,parameterDict['dbName'],ctbName))
# alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
@@ -388,7 +388,7 @@ class TDTestCase:
tdSql.query("alter table %s.%s add column c4 float"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s add tag t3 int"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s add tag t4 float"%(parameterDict['dbName'], parameterDict['stbName']))
-
+
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self, cfgPath, buildPath):
@@ -406,7 +406,7 @@ class TDTestCase:
'batchNum': 23, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
tdLog.info("create database, super table, child table, normal table")
self.create_database(tdSql, parameterDict["dbName"])
ntbName = 'ntb2'
@@ -416,18 +416,18 @@ class TDTestCase:
tdLog.info("create topics from super table and normal table")
columnTopicFromStb = 'column_topic_from_stb2'
columnTopicFromNtb = 'column_topic_from_ntb2'
-
+
tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s where c3 > 3 and c4 like 'abc' and t3 = 5 and t4 = 'beijing'" %(columnTopicFromStb, parameterDict['dbName'], parameterDict['stbName']))
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s where c3 > 3 and c4 like 'abc'" %(columnTopicFromNtb, parameterDict['dbName'], ntbName))
-
+
tdLog.info("======== super table test:")
# alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName']))
- tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop tag t1"%(parameterDict['dbName'], parameterDict['stbName']))
- tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop tag t3"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop tag t4"%(parameterDict['dbName'], parameterDict['stbName']))
@@ -485,12 +485,12 @@ class TDTestCase:
tdLog.info("======== child table test:")
parameterDict['stbName'] = 'stb21'
ctbName = 'stb21_0'
- tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
+ tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
tdSql.query("create table %s.%s using %s.%s tags (1, '2', 3, '4', '5')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName']))
tdLog.info("create topics from child table")
columnTopicFromCtb = 'column_topic_from_ctb2'
-
+
tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s where c3 > 3 and c4 like 'abc' and t3 = 5 and t4 = 'beijing'" %(columnTopicFromCtb,parameterDict['dbName'],ctbName))
# alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
@@ -536,11 +536,11 @@ class TDTestCase:
tdSql.query("alter table %s.%s add column c5 float"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s add tag t5 float"%(parameterDict['dbName'], parameterDict['stbName']))
-
+
tdLog.printNoPrefix("======== test case 2 end ...... ")
def tmqCase3(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 3: ")
+ tdLog.printNoPrefix("======== test case 3: ")
parameterDict = {'cfg': '', \
'actionType': 0, \
'dbName': 'db3', \
@@ -554,7 +554,7 @@ class TDTestCase:
'batchNum': 23, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
tdLog.info("create database, super table, child table, normal table")
self.create_database(tdSql, parameterDict["dbName"])
ntbName = 'ntb3'
@@ -564,19 +564,19 @@ class TDTestCase:
tdLog.info("create topics from super table and normal table")
columnTopicFromStb = 'star_topic_from_stb3'
columnTopicFromNtb = 'star_topic_from_ntb3'
-
+
tdSql.execute("create topic %s as select * from %s.%s" %(columnTopicFromStb, parameterDict['dbName'], parameterDict['stbName']))
tdSql.execute("create topic %s as select * from %s.%s " %(columnTopicFromNtb, parameterDict['dbName'], ntbName))
-
+
tdLog.info("======== super table test:")
# alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName']))
- tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
- tdSql.error("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop tag t1"%(parameterDict['dbName'], parameterDict['stbName']))
- tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop tag t3"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop tag t4"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop tag t5"%(parameterDict['dbName'], parameterDict['stbName']))
@@ -627,12 +627,12 @@ class TDTestCase:
tdLog.info("======== child table test:")
parameterDict['stbName'] = 'stb31'
ctbName = 'stb31_0'
- tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
+ tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
tdSql.query("create table %s.%s using %s.%s tags (10, '10', 10, '10', '10')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName']))
tdLog.info("create topics from child table")
columnTopicFromCtb = 'column_topic_from_ctb3'
-
+
tdSql.execute("create topic %s as select * from %s.%s " %(columnTopicFromCtb,parameterDict['dbName'],ctbName))
tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
@@ -647,7 +647,7 @@ class TDTestCase:
tdSql.query("alter table %s.%s set tag t3=20"%(parameterDict['dbName'], ctbName))
tdSql.query("alter table %s.%s set tag t4='20'"%(parameterDict['dbName'], ctbName))
tdSql.query("alter table %s.%s set tag t5='20'"%(parameterDict['dbName'], ctbName))
-
+
tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName']))
@@ -662,7 +662,7 @@ class TDTestCase:
# alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic
tdSql.query("alter table %s.%s add column c6 float"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s add tag t6 float"%(parameterDict['dbName'], parameterDict['stbName']))
-
+
# alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
@@ -679,7 +679,7 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 3 end ...... ")
def tmqCase4(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 4: ")
+ tdLog.printNoPrefix("======== test case 4: ")
parameterDict = {'cfg': '', \
'actionType': 0, \
'dbName': 'db4', \
@@ -695,7 +695,7 @@ class TDTestCase:
parameterDict['cfg'] = cfgPath
ctbName = 'stb4_0'
-
+
tdLog.info("create database, super table, child table, normal table")
self.create_database(tdSql, parameterDict["dbName"])
tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict["stbName"]))
@@ -703,7 +703,7 @@ class TDTestCase:
tdLog.info("create topics from super table")
columnTopicFromStb = 'star_topic_from_stb4'
-
+
tdSql.execute("create topic %s as stable %s.%s" %(columnTopicFromStb, parameterDict['dbName'], parameterDict['stbName']))
tdLog.info("======== child table test:")
@@ -739,10 +739,10 @@ class TDTestCase:
tdSql.query("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName']))
- tdSql.query("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
- tdSql.query("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t1new"%(parameterDict['dbName'], parameterDict['stbName']))
- tdSql.query("alter table %s.%s drop tag t2new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s drop tag t2new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t3new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t4new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t5new"%(parameterDict['dbName'], parameterDict['stbName']))
@@ -750,7 +750,7 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 4 end ...... ")
def tmqCase5(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 5: ")
+ tdLog.printNoPrefix("======== test case 5: ")
parameterDict = {'cfg': '', \
'actionType': 0, \
'dbName': 'db5', \
@@ -766,7 +766,7 @@ class TDTestCase:
parameterDict['cfg'] = cfgPath
ctbName = 'stb5_0'
-
+
tdLog.info("create database, super table, child table, normal table")
self.create_database(tdSql, parameterDict["dbName"])
tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict["stbName"]))
@@ -774,7 +774,7 @@ class TDTestCase:
tdLog.info("create topics from super table")
columnTopicFromStb = 'star_topic_from_db5'
-
+
tdSql.execute("create topic %s as database %s" %(columnTopicFromStb, parameterDict['dbName']))
tdLog.info("======== child table test:")
@@ -810,10 +810,10 @@ class TDTestCase:
tdSql.query("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName']))
- tdSql.query("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
- tdSql.query("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t1new"%(parameterDict['dbName'], parameterDict['stbName']))
- tdSql.query("alter table %s.%s drop tag t2new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s drop tag t2new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t3new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t4new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s drop tag t5new"%(parameterDict['dbName'], parameterDict['stbName']))
@@ -821,7 +821,7 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 5 end ...... ")
def tmqCase6(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 6: ")
+ tdLog.printNoPrefix("======== test case 6: ")
parameterDict = {'cfg': '', \
'actionType': 0, \
'dbName': 'db6', \
@@ -835,18 +835,18 @@ class TDTestCase:
'batchNum': 23, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
tdLog.info("create database, super table, child table, normal table")
self.create_database(tdSql, parameterDict["dbName"])
tdLog.info("======== child table test:")
parameterDict['stbName'] = 'stb6'
ctbName = 'stb6_0'
- tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
+ tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
tdSql.query("create table %s.%s using %s.%s tags (10, '10', 10, '10', '10')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName']))
tdLog.info("create topics from child table")
columnTopicFromCtb = 'column_topic_from_ctb6'
-
+
tdSql.execute("create topic %s as select c1, c2, c3 from %s.%s where t1 > 10 and t2 = 'beijign' and sin(t3) < 0" %(columnTopicFromCtb,parameterDict['dbName'],ctbName))
tdSql.error("alter table %s.%s modify column c1 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
@@ -861,7 +861,7 @@ class TDTestCase:
tdSql.error("alter table %s.%s set tag t3=20"%(parameterDict['dbName'], ctbName))
tdSql.query("alter table %s.%s set tag t4='20'"%(parameterDict['dbName'], ctbName))
tdSql.query("alter table %s.%s set tag t5='20'"%(parameterDict['dbName'], ctbName))
-
+
tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName']))
@@ -876,7 +876,7 @@ class TDTestCase:
# alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic
tdSql.query("alter table %s.%s add column c6 float"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.query("alter table %s.%s add tag t6 float"%(parameterDict['dbName'], parameterDict['stbName']))
-
+
# alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
@@ -903,7 +903,7 @@ class TDTestCase:
tdLog.info("cfgPath: %s" % cfgPath)
self.tmqCase1(cfgPath, buildPath)
- self.tmqCase2(cfgPath, buildPath)
+ self.tmqCase2(cfgPath, buildPath)
self.tmqCase3(cfgPath, buildPath)
self.tmqCase4(cfgPath, buildPath)
self.tmqCase5(cfgPath, buildPath)
diff --git a/tests/system-test/7-tmq/stbFilter.py b/tests/system-test/7-tmq/stbFilter.py
index 7ad3cc99e7..4942a39db4 100644
--- a/tests/system-test/7-tmq/stbFilter.py
+++ b/tests/system-test/7-tmq/stbFilter.py
@@ -79,27 +79,27 @@ class TDTestCase:
topicNameList = ['topic1', 'topic2', 'topic3']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 4 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
queryString = "select ts, log(c1), cos(c1) from %s.%s where c1 > 5000" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
queryString = "select ts, log(c1), atan(c1) from %s.%s where ts >= %d" %(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9000)
sqlString = "create topic %s as %s" %(topicNameList[2], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@@ -115,10 +115,10 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("0 tmq consume rows error!")
@@ -132,7 +132,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
@@ -148,14 +148,14 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[2] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[2], resultList[0]))
tdLog.exit("2 tmq consume rows error!")
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
@@ -193,7 +193,7 @@ class TDTestCase:
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
queryString = "select ts, sin(c1), pow(c2,3) from %s.%s where sin(c2) >= 0" %(paraDict['dbName'], paraDict['stbName'])
@@ -209,7 +209,7 @@ class TDTestCase:
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
-
+
# start tmq consume processor
tdLog.info("insert consume info to consume processor")
consumerId = 0
@@ -223,10 +223,10 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("0 tmq consume rows error!")
@@ -240,7 +240,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
@@ -256,14 +256,14 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[2] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[2], resultList[0]))
tdLog.exit("2 tmq consume rows error!")
- # time.sleep(10)
+ # time.sleep(10)
# for i in range(len(topicNameList)):
# tdSql.query("drop topic %s"%topicNameList[i])
diff --git a/tests/system-test/7-tmq/stbTagFilter-1ctb.py b/tests/system-test/7-tmq/stbTagFilter-1ctb.py
index 003dd9a47d..6a26d2ce1f 100644
--- a/tests/system-test/7-tmq/stbTagFilter-1ctb.py
+++ b/tests/system-test/7-tmq/stbTagFilter-1ctb.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 1
self.rowsPerTbl = 10000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -65,11 +65,11 @@ class TDTestCase:
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
- # tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
-
+
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
@@ -95,7 +95,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
# update to half tables
# paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
@@ -103,16 +103,16 @@ class TDTestCase:
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+ # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_UpperCase_stb1'
+ topicFromStb1 = 'topic_UpperCase_stb1'
# queryString = "select ts, c1, c2 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
- tdSql.execute(sqlString)
-
+ tdSql.execute(sqlString)
+
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 0
@@ -139,18 +139,18 @@ class TDTestCase:
tdLog.info("run select sql from db")
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, act query rows: %d"%(totalConsumeRows, totalRowsFromQuery))
if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error!")
-
- tmqCom.checkFileContent(consumerId, queryString)
+
+ tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
- tdLog.printNoPrefix("======== test case 2: ")
+ tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
@@ -170,15 +170,15 @@ class TDTestCase:
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
-
+
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
- # tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
-
+
# update to half tables
# paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl / 2)
# paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
@@ -187,17 +187,17 @@ class TDTestCase:
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+ # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom.initConsumerTable()
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_UpperCase_stb1'
+ topicFromStb1 = 'topic_UpperCase_stb1'
queryString = "select ts, c1, c2 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
# queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
-
+
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 1
@@ -213,13 +213,13 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
-
+
paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl / 2)
paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -229,7 +229,7 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
if self.snapshot == 0:
if totalConsumeRows != expectrowcnt:
@@ -237,8 +237,8 @@ class TDTestCase:
elif self.snapshot == 1:
if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error!")
-
- # tmqCom.checkFileContent(consumerId, queryString)
+
+ # tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
@@ -251,14 +251,14 @@ class TDTestCase:
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.tmqCase1()
self.tmqCase2()
-
+
self.prepareTestEnv()
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
self.tmqCase1()
self.tmqCase2()
-
+
def stop(self):
tdSql.close()
diff --git a/tests/system-test/7-tmq/stbTagFilter-multiCtb.py b/tests/system-test/7-tmq/stbTagFilter-multiCtb.py
index 1ea23fe376..9053bf2620 100644
--- a/tests/system-test/7-tmq/stbTagFilter-multiCtb.py
+++ b/tests/system-test/7-tmq/stbTagFilter-multiCtb.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 100
self.rowsPerTbl = 1000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -65,11 +65,11 @@ class TDTestCase:
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
- # tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
-
+
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
@@ -95,7 +95,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
# update to half tables
# paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
@@ -103,16 +103,16 @@ class TDTestCase:
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+ # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_UpperCase_stb1'
+ topicFromStb1 = 'topic_UpperCase_stb1'
queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
# queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'beijing' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
- tdSql.execute(sqlString)
-
+ tdSql.execute(sqlString)
+
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 0
@@ -139,18 +139,18 @@ class TDTestCase:
tdLog.info("run select sql from db")
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, act query rows: %d"%(totalConsumeRows, totalRowsFromQuery))
if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error!")
-
- # tmqCom.checkFileContent(consumerId, queryString)
+
+ # tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
- tdLog.printNoPrefix("======== test case 2: ")
+ tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
@@ -170,15 +170,15 @@ class TDTestCase:
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
-
+
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
- # tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
-
+
# update to half tables
# paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl / 2)
# paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
@@ -187,17 +187,17 @@ class TDTestCase:
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+ # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom.initConsumerTable()
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_UpperCase_stb1'
+ topicFromStb1 = 'topic_UpperCase_stb1'
# queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'beijing' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
-
+
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 1
@@ -213,7 +213,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
-
+
paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl * 7/10)
paraDict['ctbStartIdx'] = int(paraDict['ctbNum'] * 7/10)
# paraDict["rowsPerTbl"] = 100
@@ -221,7 +221,7 @@ class TDTestCase:
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -231,7 +231,7 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
if self.snapshot == 0:
if totalConsumeRows != expectrowcnt / 2:
@@ -239,8 +239,8 @@ class TDTestCase:
elif self.snapshot == 1:
if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error when snapshot is 1!")
-
- # tmqCom.checkFileContent(consumerId, queryString)
+
+ # tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
@@ -253,13 +253,13 @@ class TDTestCase:
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.tmqCase1()
self.tmqCase2()
-
+
self.prepareTestEnv()
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
self.tmqCase1()
- self.tmqCase2()
+ self.tmqCase2()
def stop(self):
tdSql.close()
diff --git a/tests/system-test/7-tmq/subscribeDb.py b/tests/system-test/7-tmq/subscribeDb.py
index b2c569e31e..ba46f72695 100644
--- a/tests/system-test/7-tmq/subscribeDb.py
+++ b/tests/system-test/7-tmq/subscribeDb.py
@@ -49,7 +49,7 @@ class TDTestCase:
print(cur)
return cur
- def initConsumerTable(self,cdbName='cdb'):
+ def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
@@ -58,7 +58,7 @@ class TDTestCase:
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
- def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@@ -73,11 +73,11 @@ class TDTestCase:
break
else:
time.sleep(5)
-
+
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
-
+
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@@ -85,20 +85,20 @@ class TDTestCase:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
-
+
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
- shellCmd += "> nul 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum):
- tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
+ tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
tsql.execute("use %s" %dbName)
tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
pre_create = "create table"
@@ -111,8 +111,8 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
- event.set()
+
+ event.set()
tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum))
return
@@ -141,7 +141,7 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
-
+
def prepareEnv(self, **parameterDict):
print ("input parameters:")
print (parameterDict)
@@ -159,7 +159,7 @@ class TDTestCase:
parameterDict["ctbNum"],\
parameterDict["rowsPerTbl"],\
parameterDict["batchNum"],\
- parameterDict["startTs"])
+ parameterDict["startTs"])
return
def tmqCase1(self, cfgPath, buildPath):
@@ -182,10 +182,10 @@ class TDTestCase:
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
-
+
tdLog.info("create topics from db")
topicName1 = 'topic_db1'
-
+
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -197,7 +197,7 @@ class TDTestCase:
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
event.wait()
tdLog.info("start consume processor")
@@ -208,14 +208,14 @@ class TDTestCase:
# wait for data ready
prepareEnvThread.join()
-
+
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -226,7 +226,7 @@ class TDTestCase:
self.initConsumerTable()
tdLog.info("create topics from db")
topicName1 = 'topic_db1'
-
+
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -238,7 +238,7 @@ class TDTestCase:
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
tdLog.info("start consume processor")
pollDelay = 20
showMsg = 1
@@ -250,7 +250,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -279,12 +279,12 @@ class TDTestCase:
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
-
+
tdLog.info("create topics from db")
topicName1 = 'topic_db1'
-
+
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
-
+
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
topicList = topicName1
@@ -298,25 +298,25 @@ class TDTestCase:
consumerId = 1
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
event.wait()
tdLog.info("start consume processor")
pollDelay = 20
showMsg = 1
- showRow = 1
+ showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
# wait for data ready
prepareEnvThread.join()
-
+
tdLog.info("insert process end, and start to check consume result")
expectRows = 2
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
if not (totalConsumeRows >= expectrowcnt):
tdLog.exit("tmq consume rows error!")
@@ -343,12 +343,12 @@ class TDTestCase:
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
tdSql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(parameterDict['dbName'], parameterDict['stbName']))
-
+
tdLog.info("create topics from db")
topicName1 = 'topic_db1'
-
+
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
-
+
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
topicList = topicName1
@@ -366,11 +366,11 @@ class TDTestCase:
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
tdLog.info("start consume processor")
pollDelay = 100
showMsg = 1
- showRow = 1
+ showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
@@ -378,14 +378,14 @@ class TDTestCase:
# wait for data ready
prepareEnvThread.join()
-
+
tdLog.info("insert process end, and start to check consume result")
expectRows = 2
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt * 2:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2))
tdLog.exit("tmq consume rows error!")
@@ -430,9 +430,9 @@ class TDTestCase:
tdLog.info("create topics from db")
topicName1 = 'topic_db1'
-
+
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
-
+
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
topicList = topicName1
@@ -443,10 +443,10 @@ class TDTestCase:
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
# consumerId = 1
# self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
event.wait()
tdLog.info("start consume processor")
@@ -456,16 +456,16 @@ class TDTestCase:
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
# wait for data ready
- prepareEnvThread.join()
+ prepareEnvThread.join()
prepareEnvThread2.join()
-
+
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -486,10 +486,10 @@ class TDTestCase:
tdLog.info("cfgPath: %s" % cfgPath)
self.tmqCase1(cfgPath, buildPath)
- self.tmqCase2(cfgPath, buildPath)
- self.tmqCase2a(cfgPath, buildPath)
+ self.tmqCase2(cfgPath, buildPath)
+ self.tmqCase2a(cfgPath, buildPath)
self.tmqCase3(cfgPath, buildPath)
-
+
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
diff --git a/tests/system-test/7-tmq/subscribeDb0.py b/tests/system-test/7-tmq/subscribeDb0.py
index 4e8fb04517..7720001fbb 100644
--- a/tests/system-test/7-tmq/subscribeDb0.py
+++ b/tests/system-test/7-tmq/subscribeDb0.py
@@ -49,7 +49,7 @@ class TDTestCase:
print(cur)
return cur
- def initConsumerTable(self,cdbName='cdb'):
+ def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
@@ -58,7 +58,7 @@ class TDTestCase:
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
- def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@@ -73,11 +73,11 @@ class TDTestCase:
break
else:
time.sleep(5)
-
+
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
-
+
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@@ -85,20 +85,20 @@ class TDTestCase:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
-
+
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
- shellCmd += "> nul 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum):
- tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
+ tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
tsql.execute("use %s" %dbName)
tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
pre_create = "create table"
@@ -111,8 +111,8 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
- event.set()
+
+ event.set()
tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum))
return
@@ -141,7 +141,7 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
-
+
def prepareEnv(self, **parameterDict):
print ("input parameters:")
print (parameterDict)
@@ -159,7 +159,7 @@ class TDTestCase:
parameterDict["ctbNum"],\
parameterDict["rowsPerTbl"],\
parameterDict["batchNum"],\
- parameterDict["startTs"])
+ parameterDict["startTs"])
return
def tmqCase4(self, cfgPath, buildPath):
@@ -198,9 +198,9 @@ class TDTestCase:
tdLog.info("create topics from db")
topicName1 = 'topic_db1'
-
+
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
-
+
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
topicList = topicName1
@@ -211,10 +211,10 @@ class TDTestCase:
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
consumerId = 1
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
event.wait()
tdLog.info("start consume processor")
@@ -224,16 +224,16 @@ class TDTestCase:
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
# wait for data ready
- prepareEnvThread.join()
+ prepareEnvThread.join()
prepareEnvThread2.join()
-
+
tdLog.info("insert process end, and start to check consume result")
expectRows = 2
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -275,9 +275,9 @@ class TDTestCase:
tdLog.info("create topics from db")
topicName1 = 'topic_db1'
-
+
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
-
+
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
topicList = topicName1
@@ -288,10 +288,10 @@ class TDTestCase:
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
consumerId = 1
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
event.wait()
tdLog.info("start consume processor")
@@ -304,16 +304,16 @@ class TDTestCase:
prepareEnvThread2.start()
# wait for data ready
- prepareEnvThread.join()
+ prepareEnvThread.join()
prepareEnvThread2.join()
-
+
tdLog.info("insert process end, and start to check consume result")
expectRows = 2
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows < expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
diff --git a/tests/system-test/7-tmq/subscribeDb1.py b/tests/system-test/7-tmq/subscribeDb1.py
index 28a341f8f3..404938158f 100644
--- a/tests/system-test/7-tmq/subscribeDb1.py
+++ b/tests/system-test/7-tmq/subscribeDb1.py
@@ -49,7 +49,7 @@ class TDTestCase:
print(cur)
return cur
- def initConsumerTable(self,cdbName='cdb'):
+ def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
@@ -58,7 +58,7 @@ class TDTestCase:
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
- def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@@ -73,11 +73,11 @@ class TDTestCase:
break
else:
time.sleep(5)
-
+
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
-
+
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@@ -85,20 +85,20 @@ class TDTestCase:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
-
+
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
- shellCmd += "> nul 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum):
- tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
+ tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
tsql.execute("use %s" %dbName)
tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
pre_create = "create table"
@@ -111,8 +111,8 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
- event.set()
+
+ event.set()
tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum))
return
@@ -141,7 +141,7 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
-
+
def prepareEnv(self, **parameterDict):
print ("input parameters:")
print (parameterDict)
@@ -159,7 +159,7 @@ class TDTestCase:
parameterDict["ctbNum"],\
parameterDict["rowsPerTbl"],\
parameterDict["batchNum"],\
- parameterDict["startTs"])
+ parameterDict["startTs"])
return
def tmqCase6(self, cfgPath, buildPath):
@@ -201,10 +201,10 @@ class TDTestCase:
tdLog.info("create topics from db")
topicName1 = 'topic_db60'
topicName2 = 'topic_db61'
-
- tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
+
+ tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName']))
-
+
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
topicList = topicName1 + ',' + topicName2
@@ -215,10 +215,10 @@ class TDTestCase:
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
#consumerId = 1
#self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
event.wait()
tdLog.info("start consume processor")
@@ -228,16 +228,16 @@ class TDTestCase:
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
# wait for data ready
- prepareEnvThread.join()
+ prepareEnvThread.join()
prepareEnvThread2.join()
-
+
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -286,10 +286,10 @@ class TDTestCase:
tdLog.info("create topics from db")
topicName1 = 'topic_db60'
topicName2 = 'topic_db61'
-
- tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
+
+ tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName']))
-
+
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
topicList = topicName1 + ',' + topicName2
@@ -300,10 +300,10 @@ class TDTestCase:
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
consumerId = 1
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
event.wait()
tdLog.info("start consume processor")
@@ -313,16 +313,16 @@ class TDTestCase:
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
# wait for data ready
- prepareEnvThread.join()
+ prepareEnvThread.join()
prepareEnvThread2.join()
-
+
tdLog.info("insert process end, and start to check consume result")
expectRows = 2
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
diff --git a/tests/system-test/7-tmq/subscribeDb2.py b/tests/system-test/7-tmq/subscribeDb2.py
index 78aaa2634c..4702aef035 100644
--- a/tests/system-test/7-tmq/subscribeDb2.py
+++ b/tests/system-test/7-tmq/subscribeDb2.py
@@ -50,7 +50,7 @@ class TDTestCase:
print(cur)
return cur
- def initConsumerTable(self,cdbName='cdb'):
+ def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
@@ -59,7 +59,7 @@ class TDTestCase:
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
- def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@@ -73,12 +73,12 @@ class TDTestCase:
if tdSql.getRows() == expectRows:
break
else:
- time.sleep(5)
+ time.sleep(5)
for i in range(expectRows):
tdLog.info ("ts: %s, consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 0), tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
-
+
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@@ -89,17 +89,17 @@ class TDTestCase:
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
- shellCmd += "> nul 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl):
- tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
+ tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
tsql.execute("use %s" %dbName)
tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
pre_create = "create table"
@@ -112,8 +112,8 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
- event.set()
+
+ event.set()
tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum))
return
@@ -146,7 +146,7 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
-
+
def prepareEnv(self, **parameterDict):
print ("input parameters:")
print (parameterDict)
@@ -165,7 +165,7 @@ class TDTestCase:
parameterDict["ctbNum"],\
parameterDict["rowsPerTbl"],\
parameterDict["batchNum"],\
- parameterDict["startTs"])
+ parameterDict["startTs"])
return
def tmqCase8(self, cfgPath, buildPath):
@@ -188,10 +188,10 @@ class TDTestCase:
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
-
+
tdLog.info("create topics from db")
topicName1 = 'topic_db1'
-
+
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
consumerId = 0
expectrowcnt = math.ceil(parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2)
@@ -203,7 +203,7 @@ class TDTestCase:
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
event.wait()
tdLog.info("start consume processor")
@@ -214,19 +214,19 @@ class TDTestCase:
# wait for data ready
prepareEnvThread.join()
-
+
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if not (totalConsumeRows >= expectrowcnt):
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
-
+
tdLog.info("again start consume processer")
self.initConsumerTable()
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -237,7 +237,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -266,10 +266,10 @@ class TDTestCase:
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
-
+
tdLog.info("create topics from db")
topicName1 = 'topic_db1'
-
+
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
consumerId = 0
expectrowcnt = math.ceil(parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2)
@@ -281,7 +281,7 @@ class TDTestCase:
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
event.wait()
tdLog.info("start consume processor")
@@ -292,14 +292,14 @@ class TDTestCase:
# wait for data ready
prepareEnvThread.join()
-
+
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
tdSql.query("select count(*) from %s.%s" %(parameterDict['dbName'], parameterDict['stbName']))
countOfStb = tdSql.getData(0,0)
print ("====total rows of stb: %d"%countOfStb)
@@ -307,7 +307,7 @@ class TDTestCase:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
if totalConsumeRows < expectrowcnt:
tdLog.exit("tmq consume rows error!")
-
+
tdLog.info("again start consume processer")
self.initConsumerTable()
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -340,7 +340,7 @@ class TDTestCase:
tdLog.info("cfgPath: %s" % cfgPath)
self.tmqCase8(cfgPath, buildPath)
- self.tmqCase9(cfgPath, buildPath)
+ self.tmqCase9(cfgPath, buildPath)
def stop(self):
tdSql.close()
diff --git a/tests/system-test/7-tmq/subscribeDb3.py b/tests/system-test/7-tmq/subscribeDb3.py
index b576a0ea70..e8e475456c 100644
--- a/tests/system-test/7-tmq/subscribeDb3.py
+++ b/tests/system-test/7-tmq/subscribeDb3.py
@@ -49,18 +49,18 @@ class TDTestCase:
print(cur)
return cur
- def initConsumerTable(self,cdbName='cdb'):
+ def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
- tdSql.query("drop table if exists %s.notifyinfo "%(cdbName))
+ tdSql.query("drop table if exists %s.notifyinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
tdSql.query("create table %s.notifyinfo (ts timestamp, cmdid int, consumerid int)"%cdbName)
- def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@@ -75,7 +75,7 @@ class TDTestCase:
else:
time.sleep(0.1)
return
-
+
def getStartCommitNotifyFromTmqsim(self,cdbName='cdb'):
while 1:
tdSql.query("select * from %s.notifyinfo"%cdbName)
@@ -95,12 +95,12 @@ class TDTestCase:
if tdSql.getRows() == expectRows:
break
else:
- time.sleep(1)
+ time.sleep(1)
for i in range(expectRows):
tdLog.info ("ts: %s, consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 0), tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
-
+
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@@ -111,17 +111,17 @@ class TDTestCase:
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
- shellCmd += "> nul 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl):
- tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
+ tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
tsql.execute("use %s" %dbName)
tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
pre_create = "create table"
@@ -134,8 +134,8 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
- event.set()
+
+ event.set()
tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum))
return
@@ -164,7 +164,7 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
-
+
def prepareEnv(self, **parameterDict):
print ("input parameters:")
print (parameterDict)
@@ -183,7 +183,7 @@ class TDTestCase:
parameterDict["ctbNum"],\
parameterDict["rowsPerTbl"],\
parameterDict["batchNum"],\
- parameterDict["startTs"])
+ parameterDict["startTs"])
return
def tmqCase10(self, cfgPath, buildPath):
@@ -206,10 +206,10 @@ class TDTestCase:
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
-
+
tdLog.info("create topics from db")
topicName1 = 'topic_db1'
-
+
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -221,7 +221,7 @@ class TDTestCase:
auto.commit.interval.ms:6000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
event.wait()
tdLog.info("start consume processor")
@@ -242,18 +242,18 @@ class TDTestCase:
resultList = self.selectConsumeResult(expectRows)
# wait for data ready
- prepareEnvThread.join()
+ prepareEnvThread.join()
tdLog.info("insert process end, and start to check consume result")
tdLog.info("again start consume processer")
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
-
+
expectRows = 1
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -283,10 +283,10 @@ class TDTestCase:
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
prepareEnvThread.start()
-
+
tdLog.info("create topics from db")
topicName1 = 'topic_db1'
-
+
tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -298,7 +298,7 @@ class TDTestCase:
auto.commit.interval.ms:1000,\
auto.offset.reset:earliest'
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
event.wait()
tdLog.info("start consume processor")
@@ -307,7 +307,7 @@ class TDTestCase:
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
- # time.sleep(6)
+ # time.sleep(6)
tdLog.info("start to wait commit notify")
self.getStartCommitNotifyFromTmqsim()
@@ -320,18 +320,18 @@ class TDTestCase:
# resultList = self.selectConsumeResult(expectRows)
# wait for data ready
- prepareEnvThread.join()
+ prepareEnvThread.join()
tdLog.info("insert process end, and start to check consume result")
tdLog.info("again start consume processer")
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
-
+
expectRows = 1
resultList = self.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows >= expectrowcnt or totalConsumeRows <= 0:
tdLog.info("act consume rows: %d, expect consume rows between %d and 0"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
diff --git a/tests/system-test/7-tmq/subscribeDb4.py b/tests/system-test/7-tmq/subscribeDb4.py
index 145cbbbbf5..63a59c0dc3 100644
--- a/tests/system-test/7-tmq/subscribeDb4.py
+++ b/tests/system-test/7-tmq/subscribeDb4.py
@@ -47,7 +47,7 @@ class TDTestCase:
pollDelay = 20
showMsg = 1
- showRow = 1
+ showRow = 1
hostname = socket.gethostname()
@@ -59,7 +59,7 @@ class TDTestCase:
def tmqCase12(self):
tdLog.printNoPrefix("======== test case 12: ")
tdLog.info("step 1: create database, stb, ctb and insert data")
-
+
tmqCom.initConsumerTable(self.cdbName)
tdCom.create_database(tdSql,self.paraDict["dbName"],self.paraDict["dropFlag"])
@@ -76,20 +76,20 @@ class TDTestCase:
tmqCom.insert_data_2(tdSql,self.paraDict["dbName"],self.paraDict["ctbPrefix"],self.paraDict["ctbNum"],self.paraDict["rowsPerTbl"],self.paraDict["batchNum"],self.paraDict["startTs"],self.paraDict["ctbStartIdx"])
tdLog.info("create topics from db")
- topicName1 = 'topic_%s'%(self.paraDict['dbName'])
+ topicName1 = 'topic_%s'%(self.paraDict['dbName'])
tdSql.execute("create topic %s as database %s" %(topicName1, self.paraDict['dbName']))
-
+
topicList = topicName1
keyList = '%s,%s,%s,%s'%(self.groupId,self.autoCommit,self.autoCommitInterval,self.autoOffset)
self.expectrowcnt = self.paraDict["rowsPerTbl"] * self.paraDict["ctbNum"] * 2
tmqCom.insertConsumerInfo(self.consumerId, self.expectrowcnt,topicList,keyList,self.ifcheckdata,self.ifManualCommit)
-
- tdLog.info("start consume processor")
+
+ tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(self.pollDelay,self.paraDict["dbName"],self.showMsg, self.showRow,self.cdbName)
tdLog.info("After waiting for a period of time, drop one stable")
- time.sleep(3)
- tdSql.execute("drop table %s.%s" %(self.paraDict['dbName'], self.paraDict['stbName']))
+ time.sleep(3)
+ tdSql.execute("drop table %s.%s" %(self.paraDict['dbName'], self.paraDict['stbName']))
tdLog.info("wait result from consumer, then check it")
expectRows = 1
@@ -98,7 +98,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if not (totalConsumeRows >= self.expectrowcnt/2 and totalConsumeRows <= self.expectrowcnt):
tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, self.expectrowcnt/2, self.expectrowcnt))
tdLog.exit("tmq consume rows error!")
diff --git a/tests/system-test/7-tmq/subscribeStb.py b/tests/system-test/7-tmq/subscribeStb.py
index 4f70340b5a..2757e590a3 100644
--- a/tests/system-test/7-tmq/subscribeStb.py
+++ b/tests/system-test/7-tmq/subscribeStb.py
@@ -56,7 +56,7 @@ class TDTestCase:
print(cur)
return cur
- def initConsumerTable(self,cdbName='cdb'):
+ def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
@@ -65,12 +65,12 @@ class TDTestCase:
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
- def initConsumerInfoTable(self,cdbName='cdb'):
+ def initConsumerInfoTable(self,cdbName='cdb'):
tdLog.info("drop consumeinfo table")
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
- def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@@ -85,11 +85,11 @@ class TDTestCase:
break
else:
time.sleep(5)
-
+
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
-
+
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@@ -97,14 +97,14 @@ class TDTestCase:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
-
+
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
- shellCmd += "> nul 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
@@ -134,7 +134,7 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@@ -149,7 +149,7 @@ class TDTestCase:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfSql = 0
+ rowsOfSql = 0
for i in range(ctbNum):
sql += " %s_%d values "%(stbName,i)
for j in range(rowsPerTbl):
@@ -168,8 +168,8 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
-
- def prepareEnv(self, **parameterDict):
+
+ def prepareEnv(self, **parameterDict):
# create new connector for my thread
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
@@ -188,8 +188,8 @@ class TDTestCase:
return
def tmqCase1(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 1: ")
-
+ tdLog.printNoPrefix("======== test case 1: ")
+
self.initConsumerTable()
# create and start thread
@@ -205,13 +205,13 @@ class TDTestCase:
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -245,7 +245,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -255,8 +255,8 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 2: ")
-
+ tdLog.printNoPrefix("======== test case 2: ")
+
self.initConsumerTable()
# create and start thread
@@ -292,7 +292,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -341,7 +341,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -362,7 +362,7 @@ class TDTestCase:
tdLog.info("cfgPath: %s" % cfgPath)
self.tmqCase1(cfgPath, buildPath)
- self.tmqCase2(cfgPath, buildPath)
+ self.tmqCase2(cfgPath, buildPath)
def stop(self):
tdSql.close()
diff --git a/tests/system-test/7-tmq/subscribeStb0.py b/tests/system-test/7-tmq/subscribeStb0.py
index 65eaab897d..26e834ae2c 100644
--- a/tests/system-test/7-tmq/subscribeStb0.py
+++ b/tests/system-test/7-tmq/subscribeStb0.py
@@ -56,7 +56,7 @@ class TDTestCase:
print(cur)
return cur
- def initConsumerTable(self,cdbName='cdb'):
+ def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
@@ -65,12 +65,12 @@ class TDTestCase:
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
- def initConsumerInfoTable(self,cdbName='cdb'):
+ def initConsumerInfoTable(self,cdbName='cdb'):
tdLog.info("drop consumeinfo table")
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
- def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@@ -85,11 +85,11 @@ class TDTestCase:
break
else:
time.sleep(5)
-
+
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
-
+
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@@ -97,14 +97,14 @@ class TDTestCase:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
-
+
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
- shellCmd += "> nul 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
@@ -134,7 +134,7 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@@ -149,7 +149,7 @@ class TDTestCase:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfSql = 0
+ rowsOfSql = 0
for i in range(ctbNum):
sql += " %s_%d values "%(stbName,i)
for j in range(rowsPerTbl):
@@ -168,8 +168,8 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
-
- def prepareEnv(self, **parameterDict):
+
+ def prepareEnv(self, **parameterDict):
# create new connector for my thread
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
@@ -188,8 +188,8 @@ class TDTestCase:
return
def tmqCase3(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 3: ")
-
+ tdLog.printNoPrefix("======== test case 3: ")
+
self.initConsumerTable()
# create and start thread
@@ -213,7 +213,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -231,7 +231,7 @@ class TDTestCase:
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
-
+
time.sleep(1.5)
tdLog.info("drop som child table of stb1")
dropTblNum = 4
@@ -246,9 +246,9 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
remaindrowcnt = parameterDict["rowsPerTbl"] * (parameterDict["ctbNum"] - dropTblNum)
-
+
tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, remaindrowcnt, expectrowcnt))
if not (totalConsumeRows <= expectrowcnt and totalConsumeRows >= remaindrowcnt):
tdLog.exit("tmq consume rows error!")
@@ -259,7 +259,7 @@ class TDTestCase:
def tmqCase4(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 4: ")
-
+
self.initConsumerTable()
# create and start thread
@@ -275,7 +275,7 @@ class TDTestCase:
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
@@ -288,7 +288,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -313,7 +313,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt/4:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4))
tdLog.exit("tmq consume rows error!")
@@ -331,7 +331,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -342,7 +342,7 @@ class TDTestCase:
def tmqCase5(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 5: ")
-
+
self.initConsumerTable()
# create and start thread
@@ -358,7 +358,7 @@ class TDTestCase:
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
@@ -371,7 +371,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -396,7 +396,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt/4:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4))
tdLog.exit("tmq consume rows error!")
@@ -414,14 +414,14 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != (expectrowcnt * (1 + 1/4)):
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicFromStb1)
- tdLog.printNoPrefix("======== test case 5 end ...... ")
+ tdLog.printNoPrefix("======== test case 5 end ...... ")
def run(self):
tdSql.prepare()
diff --git a/tests/system-test/7-tmq/subscribeStb1.py b/tests/system-test/7-tmq/subscribeStb1.py
index 90d77dba0d..0c49636b15 100644
--- a/tests/system-test/7-tmq/subscribeStb1.py
+++ b/tests/system-test/7-tmq/subscribeStb1.py
@@ -56,7 +56,7 @@ class TDTestCase:
print(cur)
return cur
- def initConsumerTable(self,cdbName='cdb'):
+ def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
@@ -65,12 +65,12 @@ class TDTestCase:
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
- def initConsumerInfoTable(self,cdbName='cdb'):
+ def initConsumerInfoTable(self,cdbName='cdb'):
tdLog.info("drop consumeinfo table")
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
- def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@@ -85,11 +85,11 @@ class TDTestCase:
break
else:
time.sleep(5)
-
+
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
-
+
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@@ -97,14 +97,14 @@ class TDTestCase:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
-
+
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
- shellCmd += "> nul 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
@@ -134,7 +134,7 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@@ -149,7 +149,7 @@ class TDTestCase:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfSql = 0
+ rowsOfSql = 0
for i in range(ctbNum):
sql += " %s_%d values "%(stbName,i)
for j in range(rowsPerTbl):
@@ -168,8 +168,8 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
-
- def prepareEnv(self, **parameterDict):
+
+ def prepareEnv(self, **parameterDict):
# create new connector for my thread
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
@@ -189,7 +189,7 @@ class TDTestCase:
def tmqCase6(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 6: ")
-
+
self.initConsumerTable()
# create and start thread
@@ -205,7 +205,7 @@ class TDTestCase:
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
@@ -218,7 +218,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -243,7 +243,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt/4:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4))
tdLog.exit("tmq consume rows error!")
@@ -265,7 +265,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -276,7 +276,7 @@ class TDTestCase:
def tmqCase7(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 7: ")
-
+
self.initConsumerTable()
# create and start thread
@@ -292,7 +292,7 @@ class TDTestCase:
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
@@ -305,7 +305,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -330,7 +330,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != 0:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0))
tdLog.exit("tmq consume rows error!")
@@ -348,7 +348,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != 0:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0))
tdLog.exit("tmq consume rows error!")
diff --git a/tests/system-test/7-tmq/subscribeStb2.py b/tests/system-test/7-tmq/subscribeStb2.py
index 74caa139f1..2cbb16ec00 100644
--- a/tests/system-test/7-tmq/subscribeStb2.py
+++ b/tests/system-test/7-tmq/subscribeStb2.py
@@ -56,7 +56,7 @@ class TDTestCase:
print(cur)
return cur
- def initConsumerTable(self,cdbName='cdb'):
+ def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
@@ -65,12 +65,12 @@ class TDTestCase:
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
- def initConsumerInfoTable(self,cdbName='cdb'):
+ def initConsumerInfoTable(self,cdbName='cdb'):
tdLog.info("drop consumeinfo table")
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
- def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@@ -85,11 +85,11 @@ class TDTestCase:
break
else:
time.sleep(5)
-
+
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
-
+
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@@ -97,14 +97,14 @@ class TDTestCase:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
-
+
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
- shellCmd += "> nul 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
@@ -134,7 +134,7 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@@ -149,7 +149,7 @@ class TDTestCase:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfSql = 0
+ rowsOfSql = 0
for i in range(ctbNum):
sql += " %s_%d values "%(stbName,i)
for j in range(rowsPerTbl):
@@ -168,8 +168,8 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
-
- def prepareEnv(self, **parameterDict):
+
+ def prepareEnv(self, **parameterDict):
# create new connector for my thread
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
@@ -188,8 +188,8 @@ class TDTestCase:
return
def tmqCase8(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 8: ")
-
+ tdLog.printNoPrefix("======== test case 8: ")
+
self.initConsumerTable()
# create and start thread
@@ -218,7 +218,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -243,7 +243,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != 0:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0))
tdLog.exit("tmq consume rows error!")
@@ -263,7 +263,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -283,7 +283,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt*2:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2))
tdLog.exit("tmq consume rows error!")
@@ -293,8 +293,8 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 8 end ...... ")
def tmqCase9(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 9: ")
-
+ tdLog.printNoPrefix("======== test case 9: ")
+
self.initConsumerTable()
# create and start thread
@@ -323,7 +323,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -348,7 +348,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != 0:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0))
tdLog.exit("tmq consume rows error!")
@@ -372,7 +372,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -392,7 +392,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt*2:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2))
tdLog.exit("tmq consume rows error!")
diff --git a/tests/system-test/7-tmq/subscribeStb3.py b/tests/system-test/7-tmq/subscribeStb3.py
index e6eaa17564..9c1b3fd241 100644
--- a/tests/system-test/7-tmq/subscribeStb3.py
+++ b/tests/system-test/7-tmq/subscribeStb3.py
@@ -56,7 +56,7 @@ class TDTestCase:
print(cur)
return cur
- def initConsumerTable(self,cdbName='cdb'):
+ def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
@@ -65,12 +65,12 @@ class TDTestCase:
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
- def initConsumerInfoTable(self,cdbName='cdb'):
+ def initConsumerInfoTable(self,cdbName='cdb'):
tdLog.info("drop consumeinfo table")
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
- def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@@ -85,11 +85,11 @@ class TDTestCase:
break
else:
time.sleep(5)
-
+
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
-
+
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@@ -97,14 +97,14 @@ class TDTestCase:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
-
+
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
- shellCmd += "> nul 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
@@ -134,7 +134,7 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@@ -149,7 +149,7 @@ class TDTestCase:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfSql = 0
+ rowsOfSql = 0
for i in range(ctbNum):
sql += " %s_%d values "%(stbName,i)
for j in range(rowsPerTbl):
@@ -168,8 +168,8 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
-
- def prepareEnv(self, **parameterDict):
+
+ def prepareEnv(self, **parameterDict):
# create new connector for my thread
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
@@ -188,8 +188,8 @@ class TDTestCase:
return
def tmqCase10(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 10: ")
-
+ tdLog.printNoPrefix("======== test case 10: ")
+
self.initConsumerTable()
# create and start thread
@@ -218,7 +218,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -243,7 +243,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != 0:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0))
tdLog.exit("tmq consume rows error!")
@@ -267,7 +267,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt-10000:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000))
tdLog.exit("tmq consume rows error!")
@@ -291,7 +291,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt*2:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2))
tdLog.exit("tmq consume rows error!")
@@ -302,7 +302,7 @@ class TDTestCase:
def tmqCase11(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 11: ")
-
+
self.initConsumerTable()
# create and start thread
@@ -318,7 +318,7 @@ class TDTestCase:
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
@@ -331,7 +331,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -356,7 +356,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != 0:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0))
tdLog.exit("tmq consume rows error!")
@@ -378,7 +378,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != 0:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0))
tdLog.exit("tmq consume rows error!")
@@ -389,7 +389,7 @@ class TDTestCase:
def tmqCase12(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 12: ")
-
+
self.initConsumerTable()
# create and start thread
@@ -405,7 +405,7 @@ class TDTestCase:
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
@@ -418,7 +418,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -443,7 +443,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt/4:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4))
tdLog.exit("tmq consume rows error!")
@@ -465,7 +465,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt/4:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4))
tdLog.exit("tmq consume rows error!")
@@ -476,7 +476,7 @@ class TDTestCase:
def tmqCase13(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 13: ")
-
+
self.initConsumerTable()
# create and start thread
@@ -492,7 +492,7 @@ class TDTestCase:
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
@@ -505,7 +505,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -530,7 +530,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt/4:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4))
tdLog.exit("tmq consume rows error!")
@@ -553,7 +553,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt*(1/2+1/4):
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4)))
tdLog.exit("tmq consume rows error!")
@@ -576,7 +576,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -596,7 +596,7 @@ class TDTestCase:
cfgPath = buildPath + "/../sim/psim/cfg"
tdLog.info("cfgPath: %s" % cfgPath)
- self.tmqCase10(cfgPath, buildPath)
+ self.tmqCase10(cfgPath, buildPath)
self.tmqCase11(cfgPath, buildPath)
self.tmqCase12(cfgPath, buildPath)
self.tmqCase13(cfgPath, buildPath)
diff --git a/tests/system-test/7-tmq/subscribeStb4.py b/tests/system-test/7-tmq/subscribeStb4.py
index f3982c8f1f..33f4c4af1a 100644
--- a/tests/system-test/7-tmq/subscribeStb4.py
+++ b/tests/system-test/7-tmq/subscribeStb4.py
@@ -56,7 +56,7 @@ class TDTestCase:
print(cur)
return cur
- def initConsumerTable(self,cdbName='cdb'):
+ def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
@@ -65,12 +65,12 @@ class TDTestCase:
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
- def initConsumerInfoTable(self,cdbName='cdb'):
+ def initConsumerInfoTable(self,cdbName='cdb'):
tdLog.info("drop consumeinfo table")
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
- def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@@ -85,11 +85,11 @@ class TDTestCase:
break
else:
time.sleep(5)
-
+
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
-
+
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@@ -97,14 +97,14 @@ class TDTestCase:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
-
+
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
- shellCmd += "> nul 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
@@ -134,7 +134,7 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@@ -149,7 +149,7 @@ class TDTestCase:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfSql = 0
+ rowsOfSql = 0
for i in range(ctbNum):
sql += " %s_%d values "%(stbName,i)
for j in range(rowsPerTbl):
@@ -168,8 +168,8 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
-
- def prepareEnv(self, **parameterDict):
+
+ def prepareEnv(self, **parameterDict):
# create new connector for my thread
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
@@ -188,8 +188,8 @@ class TDTestCase:
return
def tmqCase1(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 1: ")
-
+ tdLog.printNoPrefix("======== test case 1: ")
+
self.initConsumerTable()
auotCtbNum = 5
@@ -208,7 +208,7 @@ class TDTestCase:
'batchNum': 100,
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
@@ -216,7 +216,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * (auotCtbNum + parameterDict["ctbNum"])
@@ -248,7 +248,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -258,8 +258,8 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 2: ")
-
+ tdLog.printNoPrefix("======== test case 2: ")
+
self.initConsumerTable()
auotCtbNum = 10
@@ -278,7 +278,7 @@ class TDTestCase:
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
@@ -322,7 +322,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -343,7 +343,7 @@ class TDTestCase:
tdLog.info("cfgPath: %s" % cfgPath)
self.tmqCase1(cfgPath, buildPath)
- self.tmqCase2(cfgPath, buildPath)
+ self.tmqCase2(cfgPath, buildPath)
def stop(self):
tdSql.close()
diff --git a/tests/system-test/7-tmq/tmq3mnodeSwitch.py b/tests/system-test/7-tmq/tmq3mnodeSwitch.py
index 2769c3867b..305a93128e 100644
--- a/tests/system-test/7-tmq/tmq3mnodeSwitch.py
+++ b/tests/system-test/7-tmq/tmq3mnodeSwitch.py
@@ -36,7 +36,7 @@ class TDTestCase:
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
-
+
def checkDnodesStatusAndCreateMnode(self,dnodeNumbers):
count=0
while count < dnodeNumbers:
@@ -44,7 +44,7 @@ class TDTestCase:
# tdLog.debug(tdSql.queryResult)
dCnt = 0
for i in range(dnodeNumbers):
- if tdSql.queryResult[i][self.dnodeStatusIndex] != "ready":
+ if tdSql.queryResult[i][self.dnodeStatusIndex] != "ready":
break
else:
dCnt += 1
@@ -64,7 +64,7 @@ class TDTestCase:
while count < self.mnodeCheckCnt:
time.sleep(1)
tdSql.query("show mnodes;")
- if tdSql.checkRows(self.mnodes) :
+ if tdSql.checkRows(self.mnodes) :
tdLog.debug("mnode is three nodes")
else:
tdLog.exit("mnode number is correct")
@@ -78,17 +78,17 @@ class TDTestCase:
break
elif roleOfMnode0=='follower' and roleOfMnode1=='leader' and roleOfMnode2 == 'follower' :
self.dnodeOfLeader = tdSql.queryResult[1][self.idIndex]
- break
+ break
elif roleOfMnode0=='follower' and roleOfMnode1=='follower' and roleOfMnode2 == 'leader' :
self.dnodeOfLeader = tdSql.queryResult[2][self.idIndex]
- break
- else:
+ break
+ else:
count+=1
else:
tdLog.exit("three mnodes is not ready in 10s ")
- tdSql.query("show mnodes;")
- tdSql.checkRows(self.mnodes)
+ tdSql.query("show mnodes;")
+ tdSql.checkRows(self.mnodes)
tdSql.checkData(0,self.mnodeEpIndex,'%s:%d'%(self.host,self.startPort))
tdSql.checkData(0,self.mnodeStatusIndex,'ready')
tdSql.checkData(1,self.mnodeEpIndex,'%s:%d'%(self.host,self.startPort+self.portStep))
@@ -101,8 +101,8 @@ class TDTestCase:
while count < self.mnodeCheckCnt:
time.sleep(1)
tdSql.query("show mnodes")
- tdLog.debug(tdSql.queryResult)
- # if tdSql.checkRows(self.mnodes) :
+ tdLog.debug(tdSql.queryResult)
+ # if tdSql.checkRows(self.mnodes) :
# tdLog.debug("mnode is three nodes")
# else:
# tdLog.exit("mnode number is correct")
@@ -117,21 +117,21 @@ class TDTestCase:
break
elif roleOfMnode1=='follower' and roleOfMnode2 == 'leader' :
self.dnodeOfLeader = tdSql.queryResult[2][self.idIndex]
- break
+ break
elif roleOfMnode1=='offline' :
if roleOfMnode0=='leader' and roleOfMnode2 == 'follower' :
self.dnodeOfLeader = tdSql.queryResult[0][self.idIndex]
break
elif roleOfMnode0=='follower' and roleOfMnode2 == 'leader' :
self.dnodeOfLeader = tdSql.queryResult[2][self.idIndex]
- break
+ break
elif roleOfMnode2=='offline' :
if roleOfMnode0=='leader' and roleOfMnode1 == 'follower' :
self.dnodeOfLeader = tdSql.queryResult[0][self.idIndex]
break
elif roleOfMnode0=='follower' and roleOfMnode1 == 'leader' :
self.dnodeOfLeader = tdSql.queryResult[1][self.idIndex]
- break
+ break
count+=1
else:
@@ -144,27 +144,27 @@ class TDTestCase:
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
tdLog.info(cmdStr)
os.system(cmdStr)
-
+
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
consumeFile = open(consumeRowsFile, mode='r')
queryFile = open(dstFile, mode='r')
-
+
# skip first line for it is schema
queryFile.readline()
while True:
dst = queryFile.readline()
src = consumeFile.readline()
-
+
if dst:
if dst != src:
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
else:
break
- return
-
+ return
+
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'db1',
@@ -195,7 +195,7 @@ class TDTestCase:
tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
tdLog.info("async insert data")
pThread = tmqCom.asyncInsertData(paraDict)
-
+
tdLog.info("create topics from stb with filter")
queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
@@ -234,22 +234,22 @@ class TDTestCase:
tdDnodes[1].stoptaosd()
time.sleep(10)
self.check3mnode1off()
-
- tdLog.info("switch end and wait insert data end ................")
- pThread.join()
- tdLog.info("check the consume result")
- tdSql.query(queryString)
+ tdLog.info("switch end and wait insert data end ................")
+ pThread.join()
+
+ tdLog.info("check the consume result")
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("0 tmq consume rows error!")
- self.checkFileContent(consumerId, queryString)
+ self.checkFileContent(consumerId, queryString)
time.sleep(10)
for i in range(len(topicNameList)):
diff --git a/tests/system-test/7-tmq/tmqAlterSchema.py b/tests/system-test/7-tmq/tmqAlterSchema.py
index a2e20990d9..232a1e11fa 100644
--- a/tests/system-test/7-tmq/tmqAlterSchema.py
+++ b/tests/system-test/7-tmq/tmqAlterSchema.py
@@ -36,7 +36,7 @@ class TDTestCase:
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
-
+
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: topic: select * from stb, while consume, add column int-A/bianry-B/float-C, and then modify B, drop C")
tdLog.printNoPrefix("add tag int-A/bianry-B/float-C, and then rename A, modify B, drop C, set t2")
@@ -61,7 +61,7 @@ class TDTestCase:
topicNameList = ['topic1']
expectRowsList = []
- queryStringList = []
+ queryStringList = []
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1)
tdLog.info("create stb")
@@ -71,15 +71,15 @@ class TDTestCase:
# tdLog.info("async insert data")
# pThread = tmqCom.asyncInsertData(paraDict)
tmqCom.insert_data_2(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"],paraDict["ctbStartIdx"])
-
+
tdLog.info("create topics from stb with filter")
queryStringList.append("select * from %s.%s" %(paraDict['dbName'], paraDict['stbName']))
sqlString = "create topic %s as %s" %(topicNameList[0], queryStringList[0])
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryStringList[0])
- expectRowsList.append(tdSql.getRows())
-
+ tdSql.query(queryStringList[0])
+ expectRowsList.append(tdSql.getRows())
+
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
@@ -91,14 +91,14 @@ class TDTestCase:
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
dstFile = tmqCom.getResultFileByTaosShell(consumerId, queryStringList[0])
-
+
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
tdLog.info("wait the notify info of start consume, then alter schema")
tmqCom.getStartConsumeNotifyFromTmqsim()
-
- # add column double-A/bianry-B/double-C, and then modify B, drop C
+
+ # add column double-A/bianry-B/double-C, and then modify B, drop C
sqlString = "alter table %s.%s add column newc1 double"%(paraDict["dbName"],paraDict['stbName'])
tdSql.execute(sqlString)
sqlString = "alter table %s.%s add column newc2 binary(16)"%(paraDict["dbName"],paraDict['stbName'])
@@ -108,8 +108,8 @@ class TDTestCase:
sqlString = "alter table %s.%s modify column newc2 binary(32)"%(paraDict["dbName"],paraDict['stbName'])
tdSql.execute(sqlString)
sqlString = "alter table %s.%s drop column newc3"%(paraDict["dbName"],paraDict['stbName'])
- tdSql.execute(sqlString)
- # add tag double-A/bianry-B/double-C, and then rename A, modify B, drop C, set t1
+ tdSql.execute(sqlString)
+ # add tag double-A/bianry-B/double-C, and then rename A, modify B, drop C, set t1
sqlString = "alter table %s.%s add tag newt1 double"%(paraDict["dbName"],paraDict['stbName'])
tdSql.execute(sqlString)
sqlString = "alter table %s.%s add tag newt2 binary(16)"%(paraDict["dbName"],paraDict['stbName'])
@@ -125,27 +125,27 @@ class TDTestCase:
sqlString = "alter table %s.%s0 set tag newt2='new tag'"%(paraDict["dbName"],paraDict['ctbPrefix'])
tdSql.execute(sqlString)
- tdLog.info("check the consume result")
- tdSql.query(queryStringList[0])
+ tdLog.info("check the consume result")
+ tdSql.query(queryStringList[0])
expectRowsList.append(tdSql.getRows())
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
tdLog.info("expect consume rows: %d"%(expectRowsList[0]))
tdLog.info("act consume rows: %d"%(resultList[0]))
-
+
if expectRowsList[0] != resultList[0]:
tdLog.exit("0 tmq consume rows error!")
tmqCom.checkTmqConsumeFileContent(consumerId, dstFile)
-
+
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
-
+
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: topic: select * from ntb, while consume, add column int-A/bianry-B/float-C, and then rename A, modify B, drop C")
paraDict = {'dbName': 'db1',
@@ -166,12 +166,12 @@ class TDTestCase:
'pollDelay': 10,
'showMsg': 1,
'showRow': 1}
-
- ntbName = 'ntb'
+
+ ntbName = 'ntb'
topicNameList = ['topic1']
expectRowsList = []
- queryStringList = []
+ queryStringList = []
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1)
tdLog.info("create stb")
@@ -182,15 +182,15 @@ class TDTestCase:
# pThread = tmqCom.asyncInsertData(paraDict)
tdCom.insert_rows(tdSql, dbname=paraDict["dbName"], tbname=ntbName, column_ele_list=paraDict['colSchema'], start_ts_value=paraDict["startTs"], count=paraDict["rowsPerTbl"])
tdLog.info("insert data end")
-
+
tdLog.info("create topics from ntb with filter")
queryStringList.append("select * from %s.%s" %(paraDict['dbName'], ntbName))
sqlString = "create topic %s as %s" %(topicNameList[0], queryStringList[0])
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryStringList[0])
- expectRowsList.append(tdSql.getRows())
-
+ tdSql.query(queryStringList[0])
+ expectRowsList.append(tdSql.getRows())
+
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
@@ -202,13 +202,13 @@ class TDTestCase:
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
dstFile = tmqCom.getResultFileByTaosShell(consumerId, queryStringList[0])
-
+
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
tdLog.info("wait the notify info of start consume, then alter schema")
tmqCom.getStartConsumeNotifyFromTmqsim()
-
+
# add column double-A/bianry-B/double-C, and then rename A, modify B, drop C
sqlString = "alter table %s.%s add column newc1 double"%(paraDict["dbName"],ntbName)
tdSql.execute(sqlString)
@@ -223,21 +223,21 @@ class TDTestCase:
sqlString = "alter table %s.%s drop column newc3"%(paraDict["dbName"],ntbName)
tdSql.execute(sqlString)
- tdLog.info("check the consume result")
- tdSql.query(queryStringList[0])
+ tdLog.info("check the consume result")
+ tdSql.query(queryStringList[0])
expectRowsList.append(tdSql.getRows())
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
tdLog.info("expect consume rows: %d"%(expectRowsList[0]))
tdLog.info("act consume rows: %d"%(resultList[0]))
-
+
if expectRowsList[0] != resultList[0]:
tdLog.exit("0 tmq consume rows error!")
tmqCom.checkTmqConsumeFileContent(consumerId, dstFile)
-
+
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
diff --git a/tests/system-test/7-tmq/tmqAutoCreateTbl.py b/tests/system-test/7-tmq/tmqAutoCreateTbl.py
index 277fdf7afb..a613f11267 100644
--- a/tests/system-test/7-tmq/tmqAutoCreateTbl.py
+++ b/tests/system-test/7-tmq/tmqAutoCreateTbl.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 500
self.rowsPerTbl = 1000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -62,10 +62,10 @@ class TDTestCase:
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],"ctbx",paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
-
- # tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
@@ -96,7 +96,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
# tmqCom.initConsumerTable()
# tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
# tdLog.info("create stb")
@@ -105,12 +105,12 @@ class TDTestCase:
# tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],"ctb",paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
- tdSql.execute(sqlString)
-
+ tdSql.execute(sqlString)
+
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicFromStb1
@@ -134,9 +134,9 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, totalRowsFromQuery))
- if totalConsumeRows != totalRowsFromQuery:
+ if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicFromStb1)
@@ -144,7 +144,7 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
- tdLog.printNoPrefix("======== test case 2: ")
+ tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
@@ -169,7 +169,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
# tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
# tdLog.info("create stb")
@@ -182,13 +182,13 @@ class TDTestCase:
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
# queryString = "select ts, c1, c2 from %s.%s "%(paraDict['dbName'], paraDict['stbName'])
queryString = "select ts, c1, c2 from %s.%s where t4 == 'shanghai' and t5 == 'shanghai' "%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
-
+
consumerId = 1
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
topicList = topicFromStb1
@@ -205,7 +205,7 @@ class TDTestCase:
tdLog.info("create some new child table and insert data ")
tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],"ctby",paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
-
+
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -215,9 +215,9 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, totalRowsFromQuery))
- if totalConsumeRows != totalRowsFromQuery:
+ if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicFromStb1)
@@ -231,14 +231,14 @@ class TDTestCase:
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.tmqCase1()
self.tmqCase2()
-
+
self.prepareTestEnv()
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
self.tmqCase1()
self.tmqCase2()
-
+
def stop(self):
tdSql.close()
diff --git a/tests/system-test/7-tmq/tmqCheckData.py b/tests/system-test/7-tmq/tmqCheckData.py
index 0e55dfa19d..9338debfa6 100644
--- a/tests/system-test/7-tmq/tmqCheckData.py
+++ b/tests/system-test/7-tmq/tmqCheckData.py
@@ -27,26 +27,26 @@ class TDTestCase:
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
tdLog.info(cmdStr)
os.system(cmdStr)
-
+
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
consumeFile = open(consumeRowsFile, mode='r')
queryFile = open(dstFile, mode='r')
-
+
# skip first line for it is schema
queryFile.readline()
while True:
dst = queryFile.readline()
src = consumeFile.readline()
-
+
if dst:
if dst != src:
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
else:
break
- return
+ return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
@@ -78,13 +78,13 @@ class TDTestCase:
tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
tdLog.info("insert data")
tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
-
+
tdLog.info("create topics from stb with filter")
queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@@ -100,15 +100,15 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("0 tmq consume rows error!")
- self.checkFileContent(consumerId, queryString)
+ self.checkFileContent(consumerId, queryString)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
@@ -117,7 +117,7 @@ class TDTestCase:
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
consumerId = 1
@@ -127,7 +127,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
@@ -143,8 +143,8 @@ class TDTestCase:
sqlString = "create topic %s as %s" %(topicNameList[2], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
- expectRowsList.append(tdSql.getRows())
+ tdSql.query(queryString)
+ expectRowsList.append(tdSql.getRows())
consumerId = 2
topicList = topicNameList[2]
@@ -153,7 +153,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
# if expectRowsList[2] != resultList[0]:
@@ -162,7 +162,7 @@ class TDTestCase:
# self.checkFileContent(consumerId, queryString)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
diff --git a/tests/system-test/7-tmq/tmqCheckData1.py b/tests/system-test/7-tmq/tmqCheckData1.py
index 6cf849d1b9..7c236bbe8b 100644
--- a/tests/system-test/7-tmq/tmqCheckData1.py
+++ b/tests/system-test/7-tmq/tmqCheckData1.py
@@ -27,26 +27,26 @@ class TDTestCase:
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
tdLog.info(cmdStr)
os.system(cmdStr)
-
+
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
consumeFile = open(consumeRowsFile, mode='r')
queryFile = open(dstFile, mode='r')
-
+
# skip first line for it is schema
queryFile.readline()
while True:
dst = queryFile.readline()
src = consumeFile.readline()
-
+
if dst:
if dst != src:
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
else:
break
- return
+ return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
@@ -78,13 +78,13 @@ class TDTestCase:
tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
tdLog.info("insert data")
tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
-
+
tdLog.info("create topics from stb with filter")
queryString = "select ts,c1,c2 from %s.%s" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as stable %s.%s" %(topicNameList[0], paraDict["dbName"],paraDict["stbName"])
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@@ -100,15 +100,15 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("0 tmq consume rows error!")
- self.checkFileContent(consumerId, queryString)
+ self.checkFileContent(consumerId, queryString)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
@@ -116,7 +116,7 @@ class TDTestCase:
sqlString = "create topic %s as database %s" %(topicNameList[1], paraDict['dbName'])
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
consumerId = 1
@@ -126,7 +126,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
@@ -141,7 +141,7 @@ class TDTestCase:
sqlString = "create topic %s as %s" %(topicNameList[2], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
consumerId = 2
@@ -151,7 +151,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[2] != resultList[0]:
@@ -160,7 +160,7 @@ class TDTestCase:
self.checkFileContent(consumerId, queryString)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
diff --git a/tests/system-test/7-tmq/tmqCommon.py b/tests/system-test/7-tmq/tmqCommon.py
index 20cffed486..b1455ebe48 100644
--- a/tests/system-test/7-tmq/tmqCommon.py
+++ b/tests/system-test/7-tmq/tmqCommon.py
@@ -41,23 +41,23 @@ class TMQCom:
tdSql.init(conn.cursor())
# tdSql.init(conn.cursor(), logSql) # output sql.txt file
- def initConsumerTable(self,cdbName='cdb'):
+ def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
- tdSql.query("drop table if exists %s.notifyinfo "%(cdbName))
+ tdSql.query("drop table if exists %s.notifyinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
tdSql.query("create table %s.notifyinfo (ts timestamp, cmdid int, consumerid int)"%cdbName)
- def initConsumerInfoTable(self,cdbName='cdb'):
+ def initConsumerInfoTable(self,cdbName='cdb'):
tdLog.info("drop consumeinfo table")
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
- def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@@ -72,13 +72,13 @@ class TMQCom:
break
else:
time.sleep(5)
-
+
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
-
+
return resultList
-
+
def selectConsumeMsgResult(self,expectRows,cdbName='cdb'):
resultList=[]
while 1:
@@ -88,11 +88,11 @@ class TMQCom:
break
else:
time.sleep(5)
-
+
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 2))
-
+
return resultList
def startTmqSimProcess(self,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0,alias=0,snapshot=0):
@@ -102,7 +102,7 @@ class TMQCom:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
-
+
if (platform.system().lower() == 'windows'):
processorName = buildPath + '\\build\\bin\\tmq_sim.exe'
if alias != 0:
@@ -111,8 +111,8 @@ class TMQCom:
os.system(shellCmd)
processorName = processorNameNew
shellCmd = 'mintty -h never ' + processorName + ' -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s -e %d "%(pollDelay, dbName, showMsg, showRow, cdbName, snapshot)
- shellCmd += "> nul 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s -e %d "%(pollDelay, dbName, showMsg, showRow, cdbName, snapshot)
+ shellCmd += "> nul 2>&1 &"
else:
processorName = buildPath + '/build/bin/tmq_sim'
if alias != 0:
@@ -121,10 +121,10 @@ class TMQCom:
os.system(shellCmd)
processorName = processorNameNew
shellCmd = 'nohup ' + processorName + ' -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s -e %d "%(pollDelay, dbName, showMsg, showRow, cdbName, snapshot)
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s -e %d "%(pollDelay, dbName, showMsg, showRow, cdbName, snapshot)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
- os.system(shellCmd)
+ os.system(shellCmd)
def stopTmqSimProcess(self, processorName):
psCmd = "ps -ef|grep -w %s|grep -v grep | awk '{print $2}'"%(processorName)
@@ -149,7 +149,7 @@ class TMQCom:
for i in range(actRows):
if tdSql.getData(i, 1) == 0:
loopFlag = 0
- break
+ break
time.sleep(0.1)
return
@@ -163,7 +163,7 @@ class TMQCom:
for i in range(actRows):
if tdSql.getData(i, 1) == 1:
loopFlag = 0
- break
+ break
time.sleep(0.1)
return
@@ -196,7 +196,7 @@ class TMQCom:
tagBinaryValue = 'shanghai'
elif (i % 3 == 0):
tagBinaryValue = 'changsha'
-
+
sql += " %s.%s%d using %s.%s tags(%d, %d, %d, '%s', '%s')"%(dbName,ctbPrefix,i+ctbStartIdx,dbName,stbName,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx,tagBinaryValue,tagBinaryValue)
tblBatched += 1
if (i == ctbNum-1 ) or (tblBatched == batchNum):
@@ -206,9 +206,9 @@ class TMQCom:
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create %d child tables by %s.%s" %(ctbNum, dbName, stbName))
- return
+ return
def drop_ctable(self, tsql, dbname=None, count=1, default_ctbname_prefix="ctb",ctbStartIdx=0):
for _ in range(count):
@@ -246,7 +246,7 @@ class TMQCom:
#print("insert sql:%s"%sql)
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
- return
+ return
# schema: (ts timestamp, c1 int, c2 int, c3 binary(16))
def insert_data_1(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs):
@@ -373,16 +373,16 @@ class TMQCom:
if startTs == 0:
t = time.time()
startTs = int(round(t * 1000))
-
+
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsBatched = 0
+ rowsBatched = 0
for i in range(ctbNum):
tagBinaryValue = 'beijing'
if (i % 2 == 0):
tagBinaryValue = 'shanghai'
elif (i % 3 == 0):
tagBinaryValue = 'changsha'
-
+
sql += " %s.%s%d using %s.%s tags (%d, %d, %d, '%s', '%s') values "%(dbName,ctbPrefix,i+ctbStartIdx,dbName,stbName,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx,tagBinaryValue,tagBinaryValue)
for j in range(rowsPerTbl):
sql += "(%d, %d, %d, %d, 'binary_%d', 'nchar_%d', now) "%(startTs+j, j,j, j,i+ctbStartIdx,rowsBatched)
@@ -413,7 +413,7 @@ class TMQCom:
for i in range(ctbNum):
tbName = '%s%s'%(ctbPrefix,i)
tdCom.insert_rows(tsql,dbname=paraDict["dbName"],tbname=tbName,start_ts_value=paraDict['startTs'],count=paraDict['rowsPerTbl'])
- return
+ return
def threadFunction(self, **paraDict):
# create new connector for new tdSql instance in my thread
@@ -447,20 +447,20 @@ class TMQCom:
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
tdLog.info(cmdStr)
os.system(cmdStr)
-
+
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
consumeFile = open(consumeRowsFile, mode='r')
queryFile = open(dstFile, mode='r')
-
+
# skip first line for it is schema
queryFile.readline()
-
+
# skip offset for consumer
for i in range(0,skipRowsOfCons):
- consumeFile.readline()
-
+ consumeFile.readline()
+
lines = 0
while True:
dst = queryFile.readline()
@@ -473,7 +473,7 @@ class TMQCom:
tdLog.exit("consumerId %d consume rows[%d] is not match the rows by direct query"%(consumerId, lines))
else:
break
- return
+ return
def getResultFileByTaosShell(self, consumerId, queryString):
buildPath = tdCom.getBuildPath()
@@ -483,15 +483,15 @@ class TMQCom:
tdLog.info(cmdStr)
os.system(cmdStr)
return dstFile
-
- def checkTmqConsumeFileContent(self, consumerId, dstFile):
- cfgPath = tdCom.getClientCfgPath()
+
+ def checkTmqConsumeFileContent(self, consumerId, dstFile):
+ cfgPath = tdCom.getClientCfgPath()
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
consumeFile = open(consumeRowsFile, mode='r')
queryFile = open(dstFile, mode='r')
-
+
# skip first line for it is schema
queryFile.readline()
lines = 0
@@ -506,7 +506,7 @@ class TMQCom:
tdLog.exit("consumerId %d consume rows[%d] is not match the rows by direct query"%(consumerId, lines))
else:
break
- return
+ return
def create_ntable(self, tsql, dbname=None, tbname_prefix="ntb", tbname_index_start_num = 1, column_elm_list=None, colPrefix='c', tblNum=1, **kwargs):
tb_params = ""
@@ -538,7 +538,7 @@ class TMQCom:
column_value_str = column_value_str.rstrip()[:-1]
insert_sql = f'insert into {dbname}.{tbname_prefix}{tblIdx+tbname_index_start_num} values ({column_value_str});'
tsql.execute(insert_sql)
-
+
def waitSubscriptionExit(self, tsql, topicName):
wait_cnt = 0
while True:
@@ -548,7 +548,7 @@ class TMQCom:
for idx in range (rows):
if tsql.getData(idx, 0) != topicName:
continue
-
+
if tsql.getData(idx, 3) == None:
continue
else:
@@ -556,10 +556,10 @@ class TMQCom:
wait_cnt += 1
exit_flag = 0
break
-
+
if exit_flag == 1:
break
-
+
tsql.query("show subscriptions")
tdLog.info("show subscriptions:")
tdLog.info(tsql.queryResult)
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py
index d22d183e86..20b0c65c71 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 1
self.ctbNum = 1
self.rowsPerTbl = 100000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -62,7 +62,7 @@ class TDTestCase:
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdDnodes.stop(1)
# tdDnodes.start(1)
@@ -94,18 +94,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379)
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@@ -120,18 +120,18 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- tmqCom.checkFileContent(consumerId, queryString)
+ tmqCom.checkFileContent(consumerId, queryString)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
@@ -162,11 +162,11 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379)
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
@@ -174,7 +174,7 @@ class TDTestCase:
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
@@ -190,60 +190,60 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
firstConsumeRows = resultList[0]
-
+
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
- tdLog.exit("%d tmq consume rows error!"%consumerId)
+ tdLog.exit("%d tmq consume rows error!"%consumerId)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 2
expectrowcnt = math.ceil(totalRowsInserted/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
- resultList = tmqCom.selectConsumeResult(expectRows)
+ resultList = tmqCom.selectConsumeResult(expectRows)
secondConsumeRows = resultList[0]
-
+
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
-
+
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 3
expectrowcnt = math.ceil(totalRowsInserted/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
- resultList = tmqCom.selectConsumeResult(expectRows)
+ resultList = tmqCom.selectConsumeResult(expectRows)
thirdConsumeRows = resultList[0]
-
+
if not (totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
-
- # total consume
+
+ # total consume
actConsumeTotalRows = firstConsumeRows + secondConsumeRows + thirdConsumeRows
-
+
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py
index 951a747069..494952ecd5 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 1
self.ctbNum = 1
self.rowsPerTbl = 100000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -62,7 +62,7 @@ class TDTestCase:
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdDnodes.stop(1)
# tdDnodes.start(1)
@@ -94,18 +94,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@@ -120,18 +120,18 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- tmqCom.checkFileContent(consumerId, queryString)
+ tmqCom.checkFileContent(consumerId, queryString)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
@@ -162,18 +162,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
@@ -189,60 +189,60 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
firstConsumeRows = resultList[0]
-
+
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
- tdLog.exit("%d tmq consume rows error!"%consumerId)
+ tdLog.exit("%d tmq consume rows error!"%consumerId)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 2
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 1/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
- resultList = tmqCom.selectConsumeResult(expectRows)
+ resultList = tmqCom.selectConsumeResult(expectRows)
secondConsumeRows = resultList[0]
-
+
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
-
+
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 3
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 1/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
- resultList = tmqCom.selectConsumeResult(expectRows)
+ resultList = tmqCom.selectConsumeResult(expectRows)
thirdConsumeRows = resultList[0]
-
+
if not (totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
-
- # total consume
+
+ # total consume
actConsumeTotalRows = firstConsumeRows + secondConsumeRows + thirdConsumeRows
-
+
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py
index 6ee089af4e..666ca6ca64 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 3000
self.rowsPerTbl = 150
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -62,7 +62,7 @@ class TDTestCase:
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdDnodes.stop(1)
# tdDnodes.starttaosd(1)
@@ -94,11 +94,11 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5))
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
@@ -106,7 +106,7 @@ class TDTestCase:
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@@ -121,18 +121,18 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- # tmqCom.checkFileContent(consumerId, queryString)
+ # tmqCom.checkFileContent(consumerId, queryString)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
@@ -163,11 +163,11 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5))
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
@@ -175,7 +175,7 @@ class TDTestCase:
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
tdLog.info("select result rows: %d"%totalRowsInserted)
@@ -192,15 +192,15 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
-
+
firstConsumeRows = resultList[0]
# reinit consume info, and start tmq_sim, then check consume result
@@ -208,22 +208,22 @@ class TDTestCase:
consumerId = 2
expectrowcnt = math.ceil(totalRowsInserted*2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
actConsumeTotalRows = firstConsumeRows + resultList[0]
-
+
if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows):
- tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0]))
+ tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0]))
tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py
index 882989bfb6..e4ce3b0f77 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 3000
self.rowsPerTbl = 70
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -62,7 +62,7 @@ class TDTestCase:
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdDnodes.stop(1)
# # tdDnodes.start(1)
@@ -95,18 +95,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@@ -121,18 +121,18 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- # tmqCom.checkFileContent(consumerId, queryString)
+ # tmqCom.checkFileContent(consumerId, queryString)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
@@ -163,18 +163,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
@@ -190,15 +190,15 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
-
+
firstConsumeRows = resultList[0]
# reinit consume info, and start tmq_sim, then check consume result
@@ -206,22 +206,22 @@ class TDTestCase:
consumerId = 2
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
actConsumeTotalRows = firstConsumeRows + resultList[0]
-
+
if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows):
- tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0]))
+ tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0]))
tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py
index c334ff752b..da7d9e4651 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 10
self.rowsPerTbl = 10000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -62,7 +62,7 @@ class TDTestCase:
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdDnodes.stop(1)
# # tdDnodes.start(1)
@@ -95,18 +95,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@@ -121,18 +121,18 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- # tmqCom.checkFileContent(consumerId, queryString)
+ # tmqCom.checkFileContent(consumerId, queryString)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
@@ -163,18 +163,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
@@ -190,15 +190,15 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
-
+
firstConsumeRows = resultList[0]
# reinit consume info, and start tmq_sim, then check consume result
@@ -206,22 +206,22 @@ class TDTestCase:
consumerId = 2
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
actConsumeTotalRows = firstConsumeRows + resultList[0]
-
+
if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows):
- tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0]))
+ tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0]))
tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb.py b/tests/system-test/7-tmq/tmqConsFromTsdb.py
index a4a242365a..b3bb5f84e4 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 1
self.ctbNum = 10
self.rowsPerTbl = 10000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -62,7 +62,7 @@ class TDTestCase:
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdDnodes.stop(1)
# tdDnodes.start(1)
@@ -94,18 +94,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@@ -120,18 +120,18 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- tmqCom.checkFileContent(consumerId, queryString)
+ tmqCom.checkFileContent(consumerId, queryString)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
@@ -162,18 +162,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
@@ -189,15 +189,15 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
-
+
firstConsumeRows = resultList[0]
# reinit consume info, and start tmq_sim, then check consume result
@@ -205,22 +205,22 @@ class TDTestCase:
consumerId = 2
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
actConsumeTotalRows = firstConsumeRows + resultList[0]
-
+
if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows):
- tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0]))
+ tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0]))
tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py
index ec11b3286c..07fb9c7751 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 1
self.ctbNum = 1
self.rowsPerTbl = 1000000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -62,7 +62,7 @@ class TDTestCase:
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdDnodes.stop(1)
# tdDnodes.start(1)
@@ -93,11 +93,11 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379)
@@ -105,7 +105,7 @@ class TDTestCase:
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
@@ -125,17 +125,17 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 2
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0] + resultList[1]
-
+
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
@@ -166,11 +166,11 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379)
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
@@ -178,7 +178,7 @@ class TDTestCase:
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
@@ -194,35 +194,35 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
-
+
tdLog.info("wait commit notify")
tmqCom.getStartCommitNotifyFromTmqsim()
tdLog.info("pkill consume processor")
tdCom.killProcessor("tmq_sim")
-
+
# time.sleep(10)
-
+
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 6
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
-
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
actConsumeTotalRows = resultList[0]
-
+
if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted):
- tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
+ tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py
index 4878652593..ecdb0a4358 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 1
self.ctbNum = 1
self.rowsPerTbl = 100000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -62,7 +62,7 @@ class TDTestCase:
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdDnodes.stop(1)
# tdDnodes.start(1)
@@ -93,18 +93,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
@@ -124,17 +124,17 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 2
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0] + resultList[1]
-
+
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
@@ -165,18 +165,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
@@ -192,35 +192,35 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
-
+
tdLog.info("wait commit notify")
tmqCom.getStartCommitNotifyFromTmqsim()
tdLog.info("pkill consume processor")
tdCom.killProcessor("tmq_sim")
-
+
# time.sleep(10)
-
+
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 6
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
-
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
actConsumeTotalRows = resultList[0]
-
+
if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted):
- tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
+ tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
index 451dc43343..05f7030169 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 4000
self.rowsPerTbl = 150
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -62,7 +62,7 @@ class TDTestCase:
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdDnodes.stop(1)
# tdDnodes.start(1)
@@ -93,11 +93,11 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5))
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
@@ -105,7 +105,7 @@ class TDTestCase:
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
tdLog.info("select result rows: %d"%totalRowsInserted)
@@ -126,12 +126,12 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 2
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0] + resultList[1]
-
+
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
@@ -167,11 +167,11 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 != 0) and (ts+1a >= %d) and (t4 like '%%shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/10))
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
@@ -179,7 +179,7 @@ class TDTestCase:
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
tdLog.info("select result rows: %d"%totalRowsInserted)
@@ -196,7 +196,7 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
-
+
tdLog.info("wait commit notify")
tmqCom.getStartCommitNotifyFromTmqsim()
# tdLog.info("wait start consume notify")
@@ -204,29 +204,29 @@ class TDTestCase:
tdLog.info("pkill consume processor")
tdCom.killProcessor("tmq_sim")
-
+
# time.sleep(10)
-
+
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 6
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
-
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
actConsumeTotalRows = resultList[0]
-
+
if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted):
- tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
+ tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
index 3b3467f9f9..232d90848f 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 3000
self.rowsPerTbl = 70
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -62,7 +62,7 @@ class TDTestCase:
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdDnodes.stop(1)
# tdDnodes.start(1)
@@ -93,18 +93,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
@@ -124,17 +124,17 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 2
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0] + resultList[1]
-
+
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
@@ -165,18 +165,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
@@ -192,35 +192,35 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
-
+
tdLog.info("wait commit notify")
tmqCom.getStartCommitNotifyFromTmqsim()
tdLog.info("pkill consume processor")
tdCom.killProcessor("tmq_sim")
-
+
# time.sleep(10)
-
+
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 6
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
-
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
actConsumeTotalRows = resultList[0]
-
+
if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted):
- tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
+ tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py
index d1fe69f0b7..5841c6d605 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 10
self.rowsPerTbl = 10000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -62,7 +62,7 @@ class TDTestCase:
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdDnodes.stop(1)
# tdDnodes.start(1)
@@ -93,18 +93,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
@@ -124,17 +124,17 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 2
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0] + resultList[1]
-
+
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
@@ -165,18 +165,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
@@ -192,35 +192,35 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
-
+
tdLog.info("wait commit notify")
tmqCom.getStartCommitNotifyFromTmqsim()
tdLog.info("pkill consume processor")
tdCom.killProcessor("tmq_sim")
-
+
# time.sleep(10)
-
+
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 6
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
-
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
actConsumeTotalRows = resultList[0]
-
+
if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted):
- tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
+ tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1.py b/tests/system-test/7-tmq/tmqConsFromTsdb1.py
index 597f7968ae..499f837ccc 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb1.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb1.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 1
self.ctbNum = 10
self.rowsPerTbl = 10000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -62,7 +62,7 @@ class TDTestCase:
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdDnodes.stop(1)
# tdDnodes.start(1)
@@ -93,18 +93,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
@@ -124,17 +124,17 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
-
+ tdLog.info("wait the consume result")
+
expectRows = 2
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0] + resultList[1]
-
+
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
@@ -165,18 +165,18 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
-
+
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
@@ -192,35 +192,35 @@ class TDTestCase:
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
-
+
tdLog.info("wait commit notify")
tmqCom.getStartCommitNotifyFromTmqsim()
tdLog.info("pkill consume processor")
tdCom.killProcessor("tmq_sim")
-
+
# time.sleep(10)
-
+
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 6
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
-
+
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
actConsumeTotalRows = resultList[0]
-
+
if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted):
- tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
+ tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
diff --git a/tests/system-test/7-tmq/tmqConsumerGroup.py b/tests/system-test/7-tmq/tmqConsumerGroup.py
index bfd63fd4a2..b5cbfb8a51 100644
--- a/tests/system-test/7-tmq/tmqConsumerGroup.py
+++ b/tests/system-test/7-tmq/tmqConsumerGroup.py
@@ -27,26 +27,26 @@ class TDTestCase:
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
tdLog.info(cmdStr)
os.system(cmdStr)
-
+
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
consumeFile = open(consumeRowsFile, mode='r')
queryFile = open(dstFile, mode='r')
-
+
# skip first line for it is schema
queryFile.readline()
while True:
dst = queryFile.readline()
src = consumeFile.readline()
-
+
if dst:
if dst != src:
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
else:
break
- return
+ return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
@@ -78,7 +78,7 @@ class TDTestCase:
tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
tdLog.info("insert data")
tmqCom.insert_data_2(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
-
+
tdLog.info("create topics from stb with filter")
# queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s" %(paraDict['dbName'], paraDict['stbName'])
@@ -114,7 +114,7 @@ class TDTestCase:
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:3000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
-
+
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
@@ -132,7 +132,7 @@ class TDTestCase:
pThread.join()
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 2
resultList = tmqCom.selectConsumeResult(expectRows)
actTotalRows = 0
@@ -140,7 +140,7 @@ class TDTestCase:
actTotalRows += resultList[i]
tdSql.query(queryString)
- expectRowsList.append(tdSql.getRows())
+ expectRowsList.append(tdSql.getRows())
expectTotalRows = 0
for i in range(len(expectRowsList)):
expectTotalRows += expectRowsList[i]
@@ -150,7 +150,7 @@ class TDTestCase:
tdLog.info("act consume rows: %d should >= expect consume rows: %d"%(actTotalRows, expectTotalRows))
tdLog.exit("0 tmq consume rows error!")
- # time.sleep(10)
+ # time.sleep(10)
# for i in range(len(topicNameList)):
# tdSql.query("drop topic %s"%topicNameList[i])
diff --git a/tests/system-test/7-tmq/tmqDelete-1ctb.py b/tests/system-test/7-tmq/tmqDelete-1ctb.py
index bedb36e505..8329b00145 100644
--- a/tests/system-test/7-tmq/tmqDelete-1ctb.py
+++ b/tests/system-test/7-tmq/tmqDelete-1ctb.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 1
self.rowsPerTbl = 10000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1,wal_retention_size=-1, wal_retention_period=-1)
tdLog.info("create stb")
@@ -65,11 +65,11 @@ class TDTestCase:
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
- # tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
-
+
def delData(self,tsql,dbName,ctbPrefix,ctbNum,startTs=0,endTs=0,ctbStartIdx=0):
tdLog.debug("start to del data ............")
for i in range(ctbNum):
@@ -84,12 +84,12 @@ class TDTestCase:
newTdSql = tdCom.newTdSql()
self.delData(newTdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["startTs"],paraDict["endTs"],paraDict["ctbStartIdx"])
return
-
+
def asyncDeleteData(self, paraDict):
pThread = threading.Thread(target=self.threadFunctionForDeletaData, kwargs=paraDict)
pThread.start()
return pThread
-
+
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
@@ -116,29 +116,29 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
- # del some data
+
+ # del some data
rowsOfDelete = int(paraDict["rowsPerTbl"] / 4)
paraDict["endTs"] = paraDict["startTs"] + rowsOfDelete - 1
- self.delData(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"],
+ self.delData(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"],
startTs=paraDict["startTs"], endTs=paraDict["endTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
- tdSql.execute(sqlString)
-
+ tdSql.execute(sqlString)
+
if self.snapshot == 0:
consumerId = 0
elif self.snapshot == 1:
consumerId = 1
rowsOfDelete = 0
-
+
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"])
topicList = topicFromStb1
ifcheckdata = 1
@@ -161,23 +161,23 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, expect consume rows: %d, act query rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsFromQuery))
-
+
if self.snapshot == 0:
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error with snapshot = 0!")
elif self.snapshot == 1:
if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error with snapshot = 1!")
-
- tmqCom.checkFileContent(consumerId=consumerId, queryString=queryString, skipRowsOfCons=rowsOfDelete)
+
+ tmqCom.checkFileContent(consumerId=consumerId, queryString=queryString, skipRowsOfCons=rowsOfDelete)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
- tdLog.printNoPrefix("======== test case 2: ")
+ tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
@@ -197,50 +197,50 @@ class TDTestCase:
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
-
+
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
- tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ tdLog.info("restart taosd to ensure that the data falls into the disk")
tdSql.query("flush database %s"%(paraDict['dbName']))
-
+
# update to 1/4 rows and insert 3/4 new rows
paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl * 3 / 4)
# paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+ startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+ # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+
# del some data
rowsOfDelete = int(self.rowsPerTbl / 4 )
paraDict["endTs"] = paraDict["startTs"] + rowsOfDelete - 1
- self.delData(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"],
+ self.delData(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"],
startTs=paraDict["startTs"], endTs=paraDict["endTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tmqCom.initConsumerTable()
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
-
+
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
- consumerId = 1
-
+ consumerId = 1
+
if self.snapshot == 0:
- consumerId = 2
+ consumerId = 2
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/4 + 3/4))
elif self.snapshot == 1:
consumerId = 3
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 - 1/4 + 1/4 + 3/4))
-
+
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
@@ -252,7 +252,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
-
+
tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -262,24 +262,24 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
-
+
if self.snapshot == 0:
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error with snapshot = 0!")
elif self.snapshot == 1:
if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error with snapshot = 1!")
-
- # tmqCom.checkFileContent(consumerId, queryString)
+
+ # tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 2 end ...... ")
def tmqCase3(self):
- tdLog.printNoPrefix("======== test case 3: ")
+ tdLog.printNoPrefix("======== test case 3: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
@@ -299,34 +299,34 @@ class TDTestCase:
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
-
+
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
- tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ tdLog.info("restart taosd to ensure that the data falls into the disk")
tdSql.query("flush database %s"%(paraDict['dbName']))
-
+
tmqCom.initConsumerTable()
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
-
+
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
- consumerId = 1
-
+ consumerId = 1
+
if self.snapshot == 0:
- consumerId = 4
+ consumerId = 4
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/4 + 3/4))
elif self.snapshot == 1:
consumerId = 5
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 - 1/4 + 1/4 + 3/4))
-
+
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
@@ -340,20 +340,20 @@ class TDTestCase:
rowsOfDelete = int(self.rowsPerTbl / 4 )
paraDict["endTs"] = paraDict["startTs"] + rowsOfDelete - 1
pDeleteThread = self.asyncDeleteData(paraDict)
-
+
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
-
+
# update to 1/4 rows and insert 3/4 new rows
paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl * 3 / 4)
# paraDict['rowsPerTbl'] = self.rowsPerTbl
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+ # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
pInsertThread.join()
-
+
tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -363,17 +363,17 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
-
+
if self.snapshot == 0:
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error with snapshot = 0!")
elif self.snapshot == 1:
if not ((totalConsumeRows >= totalRowsFromQuery) and (totalConsumeRows <= expectrowcnt)):
tdLog.exit("tmq consume rows error with snapshot = 1!")
-
- # tmqCom.checkFileContent(consumerId, queryString)
+
+ # tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
@@ -385,17 +385,17 @@ class TDTestCase:
tdLog.printNoPrefix("=============================================")
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.snapshot = 0
- self.prepareTestEnv()
+ self.prepareTestEnv()
self.tmqCase1()
- self.tmqCase2()
-
+ self.tmqCase2()
+
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
self.prepareTestEnv()
self.tmqCase1()
- self.tmqCase2()
-
+ self.tmqCase2()
+
tdLog.printNoPrefix("=============================================")
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.snapshot = 0
@@ -404,7 +404,7 @@ class TDTestCase:
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
- self.prepareTestEnv()
+ self.prepareTestEnv()
self.tmqCase3()
def stop(self):
diff --git a/tests/system-test/7-tmq/tmqDelete-multiCtb.py b/tests/system-test/7-tmq/tmqDelete-multiCtb.py
index 94ca16bc6f..e59040305a 100644
--- a/tests/system-test/7-tmq/tmqDelete-multiCtb.py
+++ b/tests/system-test/7-tmq/tmqDelete-multiCtb.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 100
self.rowsPerTbl = 1000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1,wal_retention_size=-1, wal_retention_period=-1)
tdLog.info("create stb")
@@ -65,11 +65,11 @@ class TDTestCase:
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
- # tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
-
+
def delData(self,tsql,dbName,ctbPrefix,ctbNum,startTs=0,endTs=0,ctbStartIdx=0):
tdLog.debug("start to del data ............")
for i in range(ctbNum):
@@ -84,12 +84,12 @@ class TDTestCase:
newTdSql = tdCom.newTdSql()
self.delData(newTdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["startTs"],paraDict["endTs"],paraDict["ctbStartIdx"])
return
-
+
def asyncDeleteData(self, paraDict):
pThread = threading.Thread(target=self.threadFunctionForDeletaData, kwargs=paraDict)
pThread.start()
return pThread
-
+
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
@@ -116,29 +116,29 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
- # del some data
+
+ # del some data
rowsOfDelete = int(paraDict["rowsPerTbl"] / 4)
paraDict["endTs"] = paraDict["startTs"] + rowsOfDelete - 1
- self.delData(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"],
+ self.delData(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"],
startTs=paraDict["startTs"], endTs=paraDict["endTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
- tdSql.execute(sqlString)
-
+ tdSql.execute(sqlString)
+
if self.snapshot == 0:
consumerId = 0
elif self.snapshot == 1:
consumerId = 1
rowsOfDelete = 0
-
+
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"])
topicList = topicFromStb1
ifcheckdata = 1
@@ -161,23 +161,23 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, expect consume rows: %d, act query rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsFromQuery))
-
+
if self.snapshot == 0:
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error with snapshot = 0!")
elif self.snapshot == 1:
if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error with snapshot = 1!")
-
- # tmqCom.checkFileContent(consumerId=consumerId, queryString=queryString, skipRowsOfCons=rowsOfDelete)
+
+ # tmqCom.checkFileContent(consumerId=consumerId, queryString=queryString, skipRowsOfCons=rowsOfDelete)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
- tdLog.printNoPrefix("======== test case 2: ")
+ tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
@@ -197,50 +197,50 @@ class TDTestCase:
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
-
+
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
- tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ tdLog.info("restart taosd to ensure that the data falls into the disk")
tdSql.query("flush database %s"%(paraDict['dbName']))
-
+
# update to 1/4 rows and insert 3/4 new rows
paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl * 3 / 4)
# paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+ startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+ # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+
# del some data
rowsOfDelete = int(self.rowsPerTbl / 4 )
paraDict["endTs"] = paraDict["startTs"] + rowsOfDelete - 1
- self.delData(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"],
+ self.delData(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"],
startTs=paraDict["startTs"], endTs=paraDict["endTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tmqCom.initConsumerTable()
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
-
+
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
- consumerId = 1
-
+ consumerId = 1
+
if self.snapshot == 0:
- consumerId = 2
+ consumerId = 2
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/4 + 3/4))
elif self.snapshot == 1:
consumerId = 3
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 - 1/4 + 1/4 + 3/4))
-
+
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
@@ -252,7 +252,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
-
+
tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -262,24 +262,24 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
-
+
if self.snapshot == 0:
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error with snapshot = 0!")
elif self.snapshot == 1:
if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error with snapshot = 1!")
-
- # tmqCom.checkFileContent(consumerId, queryString)
+
+ # tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 2 end ...... ")
def tmqCase3(self):
- tdLog.printNoPrefix("======== test case 3: ")
+ tdLog.printNoPrefix("======== test case 3: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
@@ -299,34 +299,34 @@ class TDTestCase:
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
-
+
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
- tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ tdLog.info("restart taosd to ensure that the data falls into the disk")
tdSql.query("flush database %s"%(paraDict['dbName']))
-
+
tmqCom.initConsumerTable()
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
-
+
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
- consumerId = 1
-
+ consumerId = 1
+
if self.snapshot == 0:
- consumerId = 4
+ consumerId = 4
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/4 + 3/4))
elif self.snapshot == 1:
consumerId = 5
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 - 1/4 + 1/4 + 3/4))
-
+
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
@@ -340,20 +340,20 @@ class TDTestCase:
rowsOfDelete = int(self.rowsPerTbl / 4 )
paraDict["endTs"] = paraDict["startTs"] + rowsOfDelete - 1
pDeleteThread = self.asyncDeleteData(paraDict)
-
+
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
-
+
# update to 1/4 rows and insert 3/4 new rows
paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl * 3 / 4)
# paraDict['rowsPerTbl'] = self.rowsPerTbl
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+ # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
pInsertThread.join()
-
+
tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -363,47 +363,47 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
-
+
if self.snapshot == 0:
if totalConsumeRows < expectrowcnt:
tdLog.exit("tmq consume rows error with snapshot = 0!")
elif self.snapshot == 1:
if not ((totalConsumeRows >= totalRowsFromQuery) and (totalConsumeRows <= expectrowcnt)):
tdLog.exit("tmq consume rows error with snapshot = 1!")
-
- # tmqCom.checkFileContent(consumerId, queryString)
+
+ # tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 3 end ...... ")
- def run(self):
+ def run(self):
tdLog.printNoPrefix("=============================================")
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.snapshot = 0
self.prepareTestEnv()
self.tmqCase1()
- self.tmqCase2()
-
+ self.tmqCase2()
+
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
self.prepareTestEnv()
self.tmqCase1()
self.tmqCase2()
-
+
tdLog.printNoPrefix("=============================================")
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.snapshot = 0
- self.prepareTestEnv()
- self.tmqCase3()
+ self.prepareTestEnv()
+ self.tmqCase3()
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
- self.prepareTestEnv()
+ self.prepareTestEnv()
self.tmqCase3()
def stop(self):
diff --git a/tests/system-test/7-tmq/tmqDnode.py b/tests/system-test/7-tmq/tmqDnode.py
index 235e9ef971..802993e924 100644
--- a/tests/system-test/7-tmq/tmqDnode.py
+++ b/tests/system-test/7-tmq/tmqDnode.py
@@ -56,7 +56,7 @@ class TDTestCase:
print(cur)
return cur
- def initConsumerTable(self,cdbName='cdb'):
+ def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
@@ -65,12 +65,12 @@ class TDTestCase:
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
- def initConsumerInfoTable(self,cdbName='cdb'):
+ def initConsumerInfoTable(self,cdbName='cdb'):
tdLog.info("drop consumeinfo table")
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
- def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@@ -85,11 +85,11 @@ class TDTestCase:
break
else:
time.sleep(5)
-
+
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
-
+
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@@ -98,9 +98,9 @@ class TDTestCase:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
-
+
shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
@@ -130,7 +130,7 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@@ -149,7 +149,7 @@ class TDTestCase:
ctbDict[i] = 0
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfCtb = 0
+ rowsOfCtb = 0
while rowsOfCtb < rowsPerTbl:
for i in range(ctbNum):
sql += " %s.%s_%d values "%(dbName,ctbPrefix,i)
@@ -176,7 +176,7 @@ class TDTestCase:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfSql = 0
+ rowsOfSql = 0
for i in range(ctbNum):
sql += " %s_%d values "%(ctbPrefix,i)
for j in range(rowsPerTbl):
@@ -207,7 +207,7 @@ class TDTestCase:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfSql = 0
+ rowsOfSql = 0
for i in range(ctbNum):
sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i)
for j in range(rowsPerTbl):
@@ -226,8 +226,8 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
-
- def prepareEnv(self, **parameterDict):
+
+ def prepareEnv(self, **parameterDict):
# create new connector for my thread
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
@@ -246,8 +246,8 @@ class TDTestCase:
return
def tmqCase1(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 1: ")
-
+ tdLog.printNoPrefix("======== test case 1: ")
+
self.initConsumerTable()
# create and start thread
@@ -264,7 +264,7 @@ class TDTestCase:
'batchNum': 23, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"])
@@ -272,7 +272,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -303,7 +303,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -313,8 +313,8 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 2: ")
-
+ tdLog.printNoPrefix("======== test case 2: ")
+
self.initConsumerTable()
# create and start thread
@@ -339,7 +339,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] * 2
@@ -373,7 +373,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -387,8 +387,8 @@ class TDTestCase:
# 自动建表完成数据插入,启动消费
def tmqCase3(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 3: ")
-
+ tdLog.printNoPrefix("======== test case 3: ")
+
self.initConsumerTable()
# create and start thread
@@ -414,7 +414,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -444,7 +444,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -466,7 +466,7 @@ class TDTestCase:
tdLog.info("cfgPath: %s" % cfgPath)
# self.tmqCase1(cfgPath, buildPath)
- # self.tmqCase2(cfgPath, buildPath)
+ # self.tmqCase2(cfgPath, buildPath)
self.tmqCase3(cfgPath, buildPath)
# self.tmqCase4(cfgPath, buildPath)
# self.tmqCase5(cfgPath, buildPath)
diff --git a/tests/system-test/7-tmq/tmqDnodeRestart.py b/tests/system-test/7-tmq/tmqDnodeRestart.py
index 9a11106e3e..1902945bf6 100644
--- a/tests/system-test/7-tmq/tmqDnodeRestart.py
+++ b/tests/system-test/7-tmq/tmqDnodeRestart.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 2
self.ctbNum = 100
self.rowsPerTbl = 1000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -51,7 +51,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1,wal_retention_size=-1, wal_retention_period=-1)
tdLog.info("create stb")
@@ -63,7 +63,7 @@ class TDTestCase:
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+
tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdDnodes.stop(1)
# tdDnodes.start(1)
@@ -96,7 +96,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
# tmqCom.initConsumerTable()
# tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
# tdLog.info("create stb")
@@ -110,12 +110,12 @@ class TDTestCase:
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
-
+
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
topicList = topicFromStb1
@@ -129,7 +129,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
-
+
# time.sleep(3)
tmqCom.getStartConsumeNotifyFromTmqsim()
tdLog.info("================= restart dnode ===========================")
@@ -146,7 +146,7 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsFromQury = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, act query rows: %d"%(totalConsumeRows, totalRowsFromQury))
if not (totalConsumeRows == totalRowsFromQury):
tdLog.exit("tmq consume rows error!")
@@ -157,7 +157,7 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
- tdLog.printNoPrefix("======== test case 2: ")
+ tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
@@ -182,7 +182,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
# tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
# tdLog.info("create stb")
@@ -195,12 +195,12 @@ class TDTestCase:
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
-
+
consumerId = 1
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
topicList = topicFromStb1
@@ -225,7 +225,7 @@ class TDTestCase:
paraDict["batchNum"] = 100
paraDict["ctbPrefix"] = 'newCtb'
tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
-
+
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -235,7 +235,7 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, totalRowsFromQuery))
if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error!")
@@ -249,7 +249,7 @@ class TDTestCase:
# tdSql.prepare()
self.prepareTestEnv()
self.tmqCase1()
- self.tmqCase2()
+ self.tmqCase2()
def stop(self):
tdSql.close()
diff --git a/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py b/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py
index 650d918828..b952dc2d57 100644
--- a/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py
+++ b/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py
@@ -20,11 +20,11 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 1000
self.rowsPerTbl = 10
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
-
+
# drop some ntbs
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
@@ -51,8 +51,8 @@ class TDTestCase:
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
- paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+
tmqCom.initConsumerTable()
tdLog.info("start create database....")
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
@@ -60,11 +60,11 @@ class TDTestCase:
tmqCom.create_ntable(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=paraDict["ctbNum"])
tdLog.info("start insert data into normal tables....")
tmqCom.insert_rows_into_ntbl(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"],startTs=paraDict["startTs"], tblNum=paraDict["ctbNum"], rows=paraDict["rowsPerTbl"])
-
+
tdLog.info("create topics from database")
- topicFromDb = 'topic_dbt'
+ topicFromDb = 'topic_dbt'
tdSql.execute("create topic %s as database %s" %(topicFromDb, paraDict['dbName']))
-
+
if self.snapshot == 0:
consumerId = 0
elif self.snapshot == 1:
@@ -83,13 +83,13 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tmqCom.getStartConsumeNotifyFromTmqsim()
+ tmqCom.getStartConsumeNotifyFromTmqsim()
tdLog.info("drop some ntables")
# drop 1/4 ctbls from half offset
paraDict["ctbStartIdx"] = paraDict["ctbStartIdx"] + int(paraDict["ctbNum"] * 1 / 2)
paraDict["ctbNum"] = int(paraDict["ctbNum"] / 4)
tmqCom.drop_ctable(tdSql, dbname=paraDict['dbName'], count=paraDict["ctbNum"], default_ctbname_prefix=paraDict["ctbPrefix"], ctbStartIdx=paraDict["ctbStartIdx"])
-
+
tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -98,19 +98,19 @@ class TDTestCase:
totalConsumeRows += resultList[i]
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
-
+
if not ((totalConsumeRows >= expectrowcnt * 3/4) and (totalConsumeRows < expectrowcnt)):
tdLog.exit("tmq consume rows error with snapshot = 0!")
-
- tdLog.info("wait subscriptions exit ....")
+
+ tdLog.info("wait subscriptions exit ....")
tmqCom.waitSubscriptionExit(tdSql, topicFromDb)
-
+
tdSql.query("drop topic %s"%topicFromDb)
tdLog.info("success dorp topic: %s"%topicFromDb)
tdLog.printNoPrefix("======== test case 1 end ...... ")
-
-
+
+
# drop some ntbs and create some new ntbs
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
@@ -137,8 +137,8 @@ class TDTestCase:
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
- paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+
tmqCom.initConsumerTable()
tdLog.info("start create database....")
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
@@ -146,11 +146,11 @@ class TDTestCase:
tmqCom.create_ntable(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=paraDict["ctbNum"])
tdLog.info("start insert data into normal tables....")
tmqCom.insert_rows_into_ntbl(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"],startTs=paraDict["startTs"], tblNum=paraDict["ctbNum"], rows=paraDict["rowsPerTbl"])
-
+
tdLog.info("create topics from database")
- topicFromDb = 'topic_dbt'
+ topicFromDb = 'topic_dbt'
tdSql.execute("create topic %s as database %s" %(topicFromDb, paraDict['dbName']))
-
+
if self.snapshot == 0:
consumerId = 2
elif self.snapshot == 1:
@@ -169,20 +169,20 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tmqCom.getStartConsumeNotifyFromTmqsim()
+ tmqCom.getStartConsumeNotifyFromTmqsim()
tdLog.info("drop some ntables")
# drop 1/4 ctbls from half offset
paraDict["ctbStartIdx"] = paraDict["ctbStartIdx"] + int(paraDict["ctbNum"] * 1 / 2)
paraDict["ctbNum"] = int(paraDict["ctbNum"] / 4)
tmqCom.drop_ctable(tdSql, dbname=paraDict['dbName'], count=paraDict["ctbNum"], default_ctbname_prefix=paraDict["ctbPrefix"], ctbStartIdx=paraDict["ctbStartIdx"])
-
+
tdLog.info("start create some new normal tables....")
paraDict["ctbPrefix"] = 'newCtb'
paraDict["ctbNum"] = self.ctbNum
tmqCom.create_ntable(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=paraDict["ctbNum"])
tdLog.info("start insert data into these new normal tables....")
tmqCom.insert_rows_into_ntbl(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"],startTs=paraDict["startTs"], tblNum=paraDict["ctbNum"], rows=paraDict["rowsPerTbl"])
-
+
tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -191,24 +191,24 @@ class TDTestCase:
totalConsumeRows += resultList[i]
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
-
+
if not ((totalConsumeRows >= expectrowcnt / 2 * (1 + 3/4)) and (totalConsumeRows < expectrowcnt)):
tdLog.exit("tmq consume rows error with snapshot = 0!")
-
- tdLog.info("wait subscriptions exit ....")
+
+ tdLog.info("wait subscriptions exit ....")
tmqCom.waitSubscriptionExit(tdSql, topicFromDb)
-
+
tdSql.query("drop topic %s"%topicFromDb)
tdLog.info("success dorp topic: %s"%topicFromDb)
tdLog.printNoPrefix("======== test case 2 end ...... ")
-
- def run(self):
+
+ def run(self):
tdLog.printNoPrefix("=============================================")
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.snapshot = 0
self.tmqCase1()
- self.tmqCase2()
-
+ self.tmqCase2()
+
# tdLog.printNoPrefix("====================================================================")
# tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
# self.snapshot = 1
diff --git a/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py b/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py
index b23f422585..20e363341f 100644
--- a/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py
+++ b/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py
@@ -20,11 +20,11 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 1000
self.rowsPerTbl = 10
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
-
+
# drop some ntbs
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
@@ -51,8 +51,8 @@ class TDTestCase:
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
- paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+
tmqCom.initConsumerTable()
tdLog.info("start create database....")
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
@@ -60,11 +60,11 @@ class TDTestCase:
tmqCom.create_ntable(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=paraDict["ctbNum"])
tdLog.info("start insert data into normal tables....")
tmqCom.insert_rows_into_ntbl(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"],startTs=paraDict["startTs"], tblNum=paraDict["ctbNum"], rows=paraDict["rowsPerTbl"])
-
+
tdLog.info("create topics from database")
- topicFromDb = 'topic_dbt'
+ topicFromDb = 'topic_dbt'
tdSql.execute("create topic %s as database %s" %(topicFromDb, paraDict['dbName']))
-
+
if self.snapshot == 0:
consumerId = 0
elif self.snapshot == 1:
@@ -83,13 +83,13 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tmqCom.getStartConsumeNotifyFromTmqsim()
+ tmqCom.getStartConsumeNotifyFromTmqsim()
tdLog.info("drop some ntables")
# drop 1/4 ctbls from half offset
paraDict["ctbStartIdx"] = paraDict["ctbStartIdx"] + int(paraDict["ctbNum"] * 1 / 2)
paraDict["ctbNum"] = int(paraDict["ctbNum"] / 4)
tmqCom.drop_ctable(tdSql, dbname=paraDict['dbName'], count=paraDict["ctbNum"], default_ctbname_prefix=paraDict["ctbPrefix"], ctbStartIdx=paraDict["ctbStartIdx"])
-
+
tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -98,19 +98,19 @@ class TDTestCase:
totalConsumeRows += resultList[i]
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
-
+
if not ((totalConsumeRows >= expectrowcnt * 3/4) and (totalConsumeRows < expectrowcnt)):
tdLog.exit("tmq consume rows error with snapshot = 0!")
-
- tdLog.info("wait subscriptions exit ....")
+
+ tdLog.info("wait subscriptions exit ....")
tmqCom.waitSubscriptionExit(tdSql, topicFromDb)
-
+
tdSql.query("drop topic %s"%topicFromDb)
tdLog.info("success dorp topic: %s"%topicFromDb)
tdLog.printNoPrefix("======== test case 1 end ...... ")
-
-
+
+
# drop some ntbs and create some new ntbs
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
@@ -137,8 +137,8 @@ class TDTestCase:
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
- paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+
tmqCom.initConsumerTable()
tdLog.info("start create database....")
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
@@ -146,11 +146,11 @@ class TDTestCase:
tmqCom.create_ntable(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=paraDict["ctbNum"])
tdLog.info("start insert data into normal tables....")
tmqCom.insert_rows_into_ntbl(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"],startTs=paraDict["startTs"], tblNum=paraDict["ctbNum"], rows=paraDict["rowsPerTbl"])
-
+
tdLog.info("create topics from database")
- topicFromDb = 'topic_dbt'
+ topicFromDb = 'topic_dbt'
tdSql.execute("create topic %s as database %s" %(topicFromDb, paraDict['dbName']))
-
+
if self.snapshot == 0:
consumerId = 2
elif self.snapshot == 1:
@@ -169,20 +169,20 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tmqCom.getStartConsumeNotifyFromTmqsim()
+ tmqCom.getStartConsumeNotifyFromTmqsim()
tdLog.info("drop some ntables")
# drop 1/4 ctbls from half offset
paraDict["ctbStartIdx"] = paraDict["ctbStartIdx"] + int(paraDict["ctbNum"] * 1 / 2)
paraDict["ctbNum"] = int(paraDict["ctbNum"] / 4)
tmqCom.drop_ctable(tdSql, dbname=paraDict['dbName'], count=paraDict["ctbNum"], default_ctbname_prefix=paraDict["ctbPrefix"], ctbStartIdx=paraDict["ctbStartIdx"])
-
+
tdLog.info("start create some new normal tables....")
paraDict["ctbPrefix"] = 'newCtb'
paraDict["ctbNum"] = self.ctbNum
tmqCom.create_ntable(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=paraDict["ctbNum"])
tdLog.info("start insert data into these new normal tables....")
tmqCom.insert_rows_into_ntbl(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"],startTs=paraDict["startTs"], tblNum=paraDict["ctbNum"], rows=paraDict["rowsPerTbl"])
-
+
tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -191,24 +191,24 @@ class TDTestCase:
totalConsumeRows += resultList[i]
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
-
+
if not ((totalConsumeRows >= expectrowcnt / 2 * (1 + 3/4)) and (totalConsumeRows < expectrowcnt)):
tdLog.exit("tmq consume rows error with snapshot = 0!")
-
- tdLog.info("wait subscriptions exit ....")
+
+ tdLog.info("wait subscriptions exit ....")
tmqCom.waitSubscriptionExit(tdSql, topicFromDb)
-
+
tdSql.query("drop topic %s"%topicFromDb)
tdLog.info("success dorp topic: %s"%topicFromDb)
tdLog.printNoPrefix("======== test case 2 end ...... ")
-
- def run(self):
+
+ def run(self):
# tdLog.printNoPrefix("=============================================")
# tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
# self.snapshot = 0
# self.tmqCase1()
- # self.tmqCase2()
-
+ # self.tmqCase2()
+
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
diff --git a/tests/system-test/7-tmq/tmqDropStb.py b/tests/system-test/7-tmq/tmqDropStb.py
index 2889bdc6a6..b172224c2a 100644
--- a/tests/system-test/7-tmq/tmqDropStb.py
+++ b/tests/system-test/7-tmq/tmqDropStb.py
@@ -47,7 +47,7 @@ class TDTestCase:
pollDelay = 20
showMsg = 1
- showRow = 1
+ showRow = 1
hostname = socket.gethostname()
@@ -59,7 +59,7 @@ class TDTestCase:
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
tdLog.info("step 1: create database, stb, ctb and insert data")
-
+
tmqCom.initConsumerTable(self.cdbName)
tdCom.create_database(tdSql,self.paraDict["dbName"],self.paraDict["dropFlag"])
@@ -69,35 +69,35 @@ class TDTestCase:
tdCom.create_ctable(tdSql,dbname=self.paraDict["dbName"],stbname=self.paraDict["stbName"],tag_elm_list=self.paraDict['tagSchema'],count=self.paraDict["ctbNum"],default_ctbname_prefix=self.paraDict["ctbPrefix"])
tmqCom.insert_data_2(tdSql,self.paraDict["dbName"],self.paraDict["ctbPrefix"],self.paraDict["ctbNum"],self.paraDict["rowsPerTbl"],self.paraDict["batchNum"],self.paraDict["startTs"],self.paraDict["ctbStartIdx"])
# pThread1 = tmqCom.asyncInsertData(paraDict=self.paraDict)
-
+
self.paraDict["stbName"] = 'stb2'
self.paraDict["ctbPrefix"] = 'newctb'
self.paraDict["batchNum"] = 10000
tdCom.create_stable(tdSql,dbname=self.paraDict["dbName"],stbname=self.paraDict["stbName"],column_elm_list=self.paraDict["colSchema"],tag_elm_list=self.paraDict["tagSchema"],count=1, default_stbname_prefix=self.paraDict["stbName"])
tdCom.create_ctable(tdSql,dbname=self.paraDict["dbName"],stbname=self.paraDict["stbName"],tag_elm_list=self.paraDict['tagSchema'],count=self.paraDict["ctbNum"],default_ctbname_prefix=self.paraDict["ctbPrefix"])
# tmqCom.insert_data_2(tdSql,self.paraDict["dbName"],self.paraDict["ctbPrefix"],self.paraDict["ctbNum"],self.paraDict["rowsPerTbl"],self.paraDict["batchNum"],self.paraDict["startTs"],self.paraDict["ctbStartIdx"])
- pThread2 = tmqCom.asyncInsertData(paraDict=self.paraDict)
+ pThread2 = tmqCom.asyncInsertData(paraDict=self.paraDict)
tdLog.info("create topics from db")
- topicName1 = 'UpperCasetopic_%s'%(self.paraDict['dbName'])
+ topicName1 = 'UpperCasetopic_%s'%(self.paraDict['dbName'])
tdSql.execute("create topic %s as database %s" %(topicName1, self.paraDict['dbName']))
-
+
topicList = topicName1 + ',' +topicName1
keyList = '%s,%s,%s,%s'%(self.groupId,self.autoCommit,self.autoCommitInterval,self.autoOffset)
self.expectrowcnt = self.paraDict["rowsPerTbl"] * self.paraDict["ctbNum"] * 2
tmqCom.insertConsumerInfo(self.consumerId, self.expectrowcnt,topicList,keyList,self.ifcheckdata,self.ifManualCommit)
-
- tdLog.info("start consume processor")
+
+ tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(self.pollDelay,self.paraDict["dbName"],self.showMsg, self.showRow,self.cdbName)
-
- tmqCom.getStartConsumeNotifyFromTmqsim()
+
+ tmqCom.getStartConsumeNotifyFromTmqsim()
tdLog.info("drop one stable")
- self.paraDict["stbName"] = 'stb1'
- tdSql.execute("drop table %s.%s" %(self.paraDict['dbName'], self.paraDict['stbName']))
+ self.paraDict["stbName"] = 'stb1'
+ tdSql.execute("drop table %s.%s" %(self.paraDict['dbName'], self.paraDict['stbName']))
# tmqCom.drop_ctable(tdSql, dbname=self.paraDict['dbName'], count=self.paraDict["ctbNum"], default_ctbname_prefix=self.paraDict["ctbPrefix"])
pThread2.join()
-
+
tdLog.info("wait result from consumer, then check it")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -105,7 +105,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if not (totalConsumeRows >= self.expectrowcnt/2 and totalConsumeRows <= self.expectrowcnt):
tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, self.expectrowcnt/2, self.expectrowcnt))
tdLog.exit("tmq consume rows error!")
diff --git a/tests/system-test/7-tmq/tmqDropStbCtb.py b/tests/system-test/7-tmq/tmqDropStbCtb.py
index f86d1295f4..992a128ac0 100644
--- a/tests/system-test/7-tmq/tmqDropStbCtb.py
+++ b/tests/system-test/7-tmq/tmqDropStbCtb.py
@@ -20,11 +20,11 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 100
self.rowsPerTbl = 1000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
-
+
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
# tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -65,11 +65,11 @@ class TDTestCase:
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
- # tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
-
+
# drop some ctbs
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
@@ -96,10 +96,10 @@ class TDTestCase:
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
- paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+
tmqCom.initConsumerTable()
-
+
# again create one new stb1
paraDict["stbName"] = 'stb1'
paraDict['ctbPrefix'] = 'ctb1n_'
@@ -112,16 +112,16 @@ class TDTestCase:
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
-
+
tdLog.info("create topics from database")
- topicFromDb = 'topic_dbt'
+ topicFromDb = 'topic_dbt'
tdSql.execute("create topic %s as database %s" %(topicFromDb, paraDict['dbName']))
-
+
if self.snapshot == 0:
consumerId = 0
elif self.snapshot == 1:
consumerId = 1
-
+
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2)
topicList = topicFromDb
ifcheckdata = 1
@@ -135,17 +135,17 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tmqCom.getStartConsumeNotifyFromTmqsim()
+ tmqCom.getStartConsumeNotifyFromTmqsim()
tdLog.info("drop some ctables")
- paraDict["stbName"] = 'stb'
+ paraDict["stbName"] = 'stb'
paraDict['ctbPrefix'] = 'ctb'
paraDict["ctbStartIdx"] = paraDict["ctbStartIdx"] + int(paraDict["ctbNum"] * 3 / 4) # drop 1/4 ctbls
paraDict["ctbNum"] = int(paraDict["ctbNum"] / 4)
- # tdSql.execute("drop table %s.%s" %(paraDict['dbName'], paraDict['stbName']))
+ # tdSql.execute("drop table %s.%s" %(paraDict['dbName'], paraDict['stbName']))
tmqCom.drop_ctable(tdSql, dbname=paraDict['dbName'], count=paraDict["ctbNum"], default_ctbname_prefix=paraDict["ctbPrefix"], ctbStartIdx=paraDict["ctbStartIdx"])
-
+
pInsertThread.join()
-
+
tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -154,17 +154,17 @@ class TDTestCase:
totalConsumeRows += resultList[i]
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
-
+
if not ((totalConsumeRows > expectrowcnt / 2) and (totalConsumeRows < expectrowcnt)):
tdLog.exit("tmq consume rows error with snapshot = 0!")
- tdLog.info("wait subscriptions exit ....")
+ tdLog.info("wait subscriptions exit ....")
tmqCom.waitSubscriptionExit(tdSql, topicFromDb)
-
+
tdSql.query("drop topic %s"%topicFromDb)
tdLog.info("success dorp topic: %s"%topicFromDb)
tdLog.printNoPrefix("======== test case 1 end ...... ")
-
+
# drop one stb
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
@@ -191,10 +191,10 @@ class TDTestCase:
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
- paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+
tmqCom.initConsumerTable()
-
+
# again create one new stb1
paraDict["stbName"] = 'stb2'
paraDict['ctbPrefix'] = 'ctb2n_'
@@ -207,16 +207,16 @@ class TDTestCase:
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
-
+
tdLog.info("create topics from database")
- topicFromDb = 'topic_dbt'
+ topicFromDb = 'topic_dbt'
tdSql.execute("create topic %s as database %s" %(topicFromDb, paraDict['dbName']))
-
+
if self.snapshot == 0:
consumerId = 2
elif self.snapshot == 1:
consumerId = 3
-
+
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2)
topicList = topicFromDb
ifcheckdata = 1
@@ -230,13 +230,13 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
- tmqCom.getStartConsumeNotifyFromTmqsim()
+ tmqCom.getStartConsumeNotifyFromTmqsim()
tdLog.info("drop one stable")
paraDict["stbName"] = 'stb1'
- tdSql.execute("drop table %s.%s" %(paraDict['dbName'], paraDict['stbName']))
+ tdSql.execute("drop table %s.%s" %(paraDict['dbName'], paraDict['stbName']))
pInsertThread.join()
-
+
tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -245,25 +245,25 @@ class TDTestCase:
totalConsumeRows += resultList[i]
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
-
+
if not ((totalConsumeRows > expectrowcnt / 2) and (totalConsumeRows < expectrowcnt)):
tdLog.exit("tmq consume rows error with snapshot = 0!")
- tdLog.info("wait subscriptions exit ....")
+ tdLog.info("wait subscriptions exit ....")
tmqCom.waitSubscriptionExit(tdSql, topicFromDb)
-
+
tdSql.query("drop topic %s"%topicFromDb)
tdLog.info("success dorp topic: %s"%topicFromDb)
tdLog.printNoPrefix("======== test case 2 end ...... ")
- def run(self):
+ def run(self):
tdLog.printNoPrefix("=============================================")
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.snapshot = 0
self.prepareTestEnv()
self.tmqCase1()
- self.tmqCase2()
-
+ self.tmqCase2()
+
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
diff --git a/tests/system-test/7-tmq/tmqError.py b/tests/system-test/7-tmq/tmqError.py
index bd8ec565d8..9afcfaf968 100644
--- a/tests/system-test/7-tmq/tmqError.py
+++ b/tests/system-test/7-tmq/tmqError.py
@@ -56,7 +56,7 @@ class TDTestCase:
print(cur)
return cur
- def initConsumerTable(self,cdbName='cdb'):
+ def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
@@ -65,12 +65,12 @@ class TDTestCase:
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
- def initConsumerInfoTable(self,cdbName='cdb'):
+ def initConsumerInfoTable(self,cdbName='cdb'):
tdLog.info("drop consumeinfo table")
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
- def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@@ -85,11 +85,11 @@ class TDTestCase:
break
else:
time.sleep(5)
-
+
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
-
+
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@@ -97,14 +97,14 @@ class TDTestCase:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
-
+
if (platform.system().lower() == 'windows'):
shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
- shellCmd += "> nul 2>&1 &"
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
@@ -134,7 +134,7 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@@ -149,7 +149,7 @@ class TDTestCase:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfSql = 0
+ rowsOfSql = 0
for i in range(ctbNum):
sql += " %s_%d values "%(stbName,i)
for j in range(rowsPerTbl):
@@ -168,8 +168,8 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
-
- def prepareEnv(self, **parameterDict):
+
+ def prepareEnv(self, **parameterDict):
# create new connector for my thread
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
@@ -189,11 +189,11 @@ class TDTestCase:
def tmqCase1(self, cfgPath, buildPath):
'''
- Leave a TMQ process. Stop taosd, delete the data directory, restart taosd,
+ Leave a TMQ process. Stop taosd, delete the data directory, restart taosd,
and restart a consumption process to complete a consumption
'''
tdLog.printNoPrefix("======== test case 1: ")
-
+
self.initConsumerTable()
# create and start thread
@@ -217,7 +217,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
# expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -236,7 +236,7 @@ class TDTestCase:
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
-
+
time.sleep(3)
tdLog.info("================= stop dnode, and remove data file, then start dnode ===========================")
tdDnodes.stop(1)
@@ -248,7 +248,7 @@ class TDTestCase:
tdDnodes.start(1)
time.sleep(2)
- ######### redo to consume
+ ######### redo to consume
self.initConsumerTable()
self.create_database(tdSql, parameterDict["dbName"])
@@ -258,7 +258,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -282,7 +282,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
if not (totalConsumeRows == expectrowcnt):
tdLog.exit("tmq consume rows error!")
@@ -293,7 +293,7 @@ class TDTestCase:
else:
os.system('pkill tmq_sim')
- tdLog.printNoPrefix("======== test case 1 end ...... ")
+ tdLog.printNoPrefix("======== test case 1 end ...... ")
def run(self):
tdSql.prepare()
diff --git a/tests/system-test/7-tmq/tmqModule.py b/tests/system-test/7-tmq/tmqModule.py
index 086fde8f05..1ff47da159 100644
--- a/tests/system-test/7-tmq/tmqModule.py
+++ b/tests/system-test/7-tmq/tmqModule.py
@@ -56,7 +56,7 @@ class TDTestCase:
print(cur)
return cur
- def initConsumerTable(self,cdbName='cdb'):
+ def initConsumerTable(self,cdbName='cdb'):
tdLog.info("create consume database, and consume info table, and consume result table")
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
@@ -65,12 +65,12 @@ class TDTestCase:
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
- def initConsumerInfoTable(self,cdbName='cdb'):
+ def initConsumerInfoTable(self,cdbName='cdb'):
tdLog.info("drop consumeinfo table")
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
- def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
sql = "insert into %s.consumeinfo values "%cdbName
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
tdLog.info("consume info sql: %s"%sql)
@@ -85,11 +85,11 @@ class TDTestCase:
break
else:
time.sleep(5)
-
+
for i in range(expectRows):
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
resultList.append(tdSql.getData(i , 3))
-
+
return resultList
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
@@ -98,9 +98,9 @@ class TDTestCase:
logFile = cfgPath + '/../log/valgrind-tmq.log'
shellCmd = 'nohup valgrind --log-file=' + logFile
shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
-
+
shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath
- shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
@@ -130,7 +130,7 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@@ -149,7 +149,7 @@ class TDTestCase:
ctbDict[i] = 0
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfCtb = 0
+ rowsOfCtb = 0
while rowsOfCtb < rowsPerTbl:
for i in range(ctbNum):
sql += " %s.%s_%d values "%(dbName,ctbPrefix,i)
@@ -176,7 +176,7 @@ class TDTestCase:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfSql = 0
+ rowsOfSql = 0
for i in range(ctbNum):
sql += " %s_%d values "%(ctbPrefix,i)
for j in range(rowsPerTbl):
@@ -207,7 +207,7 @@ class TDTestCase:
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
- rowsOfSql = 0
+ rowsOfSql = 0
for i in range(ctbNum):
sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i)
for j in range(rowsPerTbl):
@@ -226,8 +226,8 @@ class TDTestCase:
tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
-
- def prepareEnv(self, **parameterDict):
+
+ def prepareEnv(self, **parameterDict):
# create new connector for my thread
tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
@@ -246,8 +246,8 @@ class TDTestCase:
return
def tmqCase1(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 1: ")
-
+ tdLog.printNoPrefix("======== test case 1: ")
+
self.initConsumerTable()
# create and start thread
@@ -263,7 +263,7 @@ class TDTestCase:
'batchNum': 33, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
@@ -271,7 +271,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -296,7 +296,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -306,8 +306,8 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 2: ")
-
+ tdLog.printNoPrefix("======== test case 2: ")
+
self.initConsumerTable()
# create and start thread
@@ -343,7 +343,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -392,7 +392,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -402,8 +402,8 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 2 end ...... ")
def tmqCase3(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 3: ")
-
+ tdLog.printNoPrefix("======== test case 3: ")
+
self.initConsumerTable()
# create and start thread
@@ -427,7 +427,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -445,7 +445,7 @@ class TDTestCase:
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
-
+
time.sleep(5)
tdLog.info("drop som child table of stb1")
dropTblNum = 4
@@ -460,7 +460,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
remaindrowcnt = parameterDict["rowsPerTbl"] * (parameterDict["ctbNum"] - dropTblNum)
if not (totalConsumeRows < expectrowcnt and totalConsumeRows > remaindrowcnt):
@@ -473,7 +473,7 @@ class TDTestCase:
def tmqCase4(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 4: ")
-
+
self.initConsumerTable()
# create and start thread
@@ -489,7 +489,7 @@ class TDTestCase:
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
@@ -502,7 +502,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -527,7 +527,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt/4:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4))
tdLog.exit("tmq consume rows error!")
@@ -545,7 +545,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -556,7 +556,7 @@ class TDTestCase:
def tmqCase5(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 5: ")
-
+
self.initConsumerTable()
# create and start thread
@@ -572,7 +572,7 @@ class TDTestCase:
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
@@ -585,7 +585,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -610,7 +610,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt/4:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4))
tdLog.exit("tmq consume rows error!")
@@ -628,7 +628,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != (expectrowcnt * (1 + 1/4)):
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -639,7 +639,7 @@ class TDTestCase:
def tmqCase6(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 6: ")
-
+
self.initConsumerTable()
# create and start thread
@@ -655,7 +655,7 @@ class TDTestCase:
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
@@ -668,7 +668,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -693,7 +693,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt/4:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4))
tdLog.exit("tmq consume rows error!")
@@ -715,7 +715,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -726,7 +726,7 @@ class TDTestCase:
def tmqCase7(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 7: ")
-
+
self.initConsumerTable()
# create and start thread
@@ -742,7 +742,7 @@ class TDTestCase:
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
@@ -755,7 +755,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -780,7 +780,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != 0:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0))
tdLog.exit("tmq consume rows error!")
@@ -798,7 +798,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != 0:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0))
tdLog.exit("tmq consume rows error!")
@@ -808,8 +808,8 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 7 end ...... ")
def tmqCase8(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 8: ")
-
+ tdLog.printNoPrefix("======== test case 8: ")
+
self.initConsumerTable()
# create and start thread
@@ -838,7 +838,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -863,7 +863,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != 0:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0))
tdLog.exit("tmq consume rows error!")
@@ -883,7 +883,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -903,7 +903,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt*2:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2))
tdLog.exit("tmq consume rows error!")
@@ -913,8 +913,8 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 8 end ...... ")
def tmqCase9(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 9: ")
-
+ tdLog.printNoPrefix("======== test case 9: ")
+
self.initConsumerTable()
# create and start thread
@@ -943,7 +943,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -968,7 +968,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != 0:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0))
tdLog.exit("tmq consume rows error!")
@@ -992,7 +992,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -1012,7 +1012,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt*2:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2))
tdLog.exit("tmq consume rows error!")
@@ -1022,8 +1022,8 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 9 end ...... ")
def tmqCase10(self, cfgPath, buildPath):
- tdLog.printNoPrefix("======== test case 10: ")
-
+ tdLog.printNoPrefix("======== test case 10: ")
+
self.initConsumerTable()
# create and start thread
@@ -1052,7 +1052,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -1077,7 +1077,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != 0:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0))
tdLog.exit("tmq consume rows error!")
@@ -1101,7 +1101,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt-10000:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000))
tdLog.exit("tmq consume rows error!")
@@ -1125,7 +1125,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt*2:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2))
tdLog.exit("tmq consume rows error!")
@@ -1136,7 +1136,7 @@ class TDTestCase:
def tmqCase11(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 11: ")
-
+
self.initConsumerTable()
# create and start thread
@@ -1152,7 +1152,7 @@ class TDTestCase:
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
@@ -1165,7 +1165,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -1190,7 +1190,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != 0:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0))
tdLog.exit("tmq consume rows error!")
@@ -1212,7 +1212,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != 0:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0))
tdLog.exit("tmq consume rows error!")
@@ -1223,7 +1223,7 @@ class TDTestCase:
def tmqCase12(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 12: ")
-
+
self.initConsumerTable()
# create and start thread
@@ -1239,7 +1239,7 @@ class TDTestCase:
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
@@ -1252,7 +1252,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -1277,7 +1277,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt/4:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4))
tdLog.exit("tmq consume rows error!")
@@ -1299,7 +1299,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt/4:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4))
tdLog.exit("tmq consume rows error!")
@@ -1310,7 +1310,7 @@ class TDTestCase:
def tmqCase13(self, cfgPath, buildPath):
tdLog.printNoPrefix("======== test case 13: ")
-
+
self.initConsumerTable()
# create and start thread
@@ -1326,7 +1326,7 @@ class TDTestCase:
'batchNum': 100, \
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
parameterDict['cfg'] = cfgPath
-
+
self.create_database(tdSql, parameterDict["dbName"])
self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
@@ -1339,7 +1339,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
-
+
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName']))
consumerId = 0
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
@@ -1364,7 +1364,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt/4:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4))
tdLog.exit("tmq consume rows error!")
@@ -1387,7 +1387,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt*(1/2+1/4):
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4)))
tdLog.exit("tmq consume rows error!")
@@ -1410,7 +1410,7 @@ class TDTestCase:
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
-
+
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
@@ -1431,7 +1431,7 @@ class TDTestCase:
tdLog.info("cfgPath: %s" % cfgPath)
self.tmqCase1(cfgPath, buildPath)
- # self.tmqCase2(cfgPath, buildPath)
+ # self.tmqCase2(cfgPath, buildPath)
# self.tmqCase3(cfgPath, buildPath)
# self.tmqCase4(cfgPath, buildPath)
# self.tmqCase5(cfgPath, buildPath)
diff --git a/tests/system-test/7-tmq/tmqSubscribeStb-r3.py b/tests/system-test/7-tmq/tmqSubscribeStb-r3.py
index 6461ee9644..c5f98bc3a0 100644
--- a/tests/system-test/7-tmq/tmqSubscribeStb-r3.py
+++ b/tests/system-test/7-tmq/tmqSubscribeStb-r3.py
@@ -39,20 +39,20 @@ class TDTestCase:
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
tdLog.info(cmdStr)
os.system(cmdStr)
-
+
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
consumeFile = open(consumeRowsFile, mode='r')
queryFile = open(dstFile, mode='r')
-
+
# skip first line for it is schema
queryFile.readline()
while True:
dst = queryFile.readline()
src = consumeFile.readline()
-
+
if dst:
if dst != src:
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
@@ -85,7 +85,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=self.replica)
tdLog.info("create stb")
@@ -101,7 +101,7 @@ class TDTestCase:
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom.asyncInsertDataByInterlace(paraDict)
-
+
tdLog.info("wait some data inserted")
exitFlag = 1
while exitFlag:
@@ -112,7 +112,7 @@ class TDTestCase:
if (rowsInserted > ((self.ctbNum * self.rowsPerTbl)/5)):
exitFlag = 0
time.sleep(0.1)
-
+
tdLog.info("inserted rows: %d"%tdSql.getData(0,0))
# tdDnodes=cluster.dnodes
tdLog.info("================= restart dnode 2===========================")
@@ -123,18 +123,18 @@ class TDTestCase:
cluster.dnodes[2].starttaosd()
tdLog.info("================= restart dnode 4===========================")
cluster.dnodes[3].stoptaosd()
- cluster.dnodes[3].starttaosd()
+ cluster.dnodes[3].starttaosd()
tdLog.info("================= restart dnode 5===========================")
cluster.dnodes[4].stoptaosd()
cluster.dnodes[4].starttaosd()
-
- # tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
def tmqCase1(self):
- tdLog.printNoPrefix("======== test case 1: ")
-
+ tdLog.printNoPrefix("======== test case 1: ")
+
# create and start thread
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
@@ -159,14 +159,14 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha' "%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
-
+
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicFromStb1
@@ -178,9 +178,9 @@ class TDTestCase:
auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
- tdLog.info("start consume processor")
+ tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
-
+
tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -190,13 +190,13 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
-
+
if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error!")
-
- # tmqCom.checkFileContent(consumerId, queryString)
+
+ # tmqCom.checkFileContent(consumerId, queryString)
tmqCom.waitSubscriptionExit(tdSql, topicFromStb1)
tdSql.query("drop topic %s"%topicFromStb1)
@@ -204,8 +204,8 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
- tdLog.printNoPrefix("======== test case 2: ")
-
+ tdLog.printNoPrefix("======== test case 2: ")
+
# create and start thread
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
@@ -230,14 +230,14 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha' "%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
-
+
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicFromStb1
@@ -249,7 +249,7 @@ class TDTestCase:
auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
- tdLog.info("start consume processor")
+ tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("================= restart dnode 2===========================")
@@ -260,11 +260,11 @@ class TDTestCase:
cluster.dnodes[2].starttaosd()
tdLog.info("================= restart dnode 4===========================")
cluster.dnodes[3].stoptaosd()
- cluster.dnodes[3].starttaosd()
+ cluster.dnodes[3].starttaosd()
tdLog.info("================= restart dnode 5===========================")
cluster.dnodes[4].stoptaosd()
cluster.dnodes[4].starttaosd()
-
+
tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -274,13 +274,13 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
-
+
if totalConsumeRows != totalRowsFromQuery:
tdLog.exit("tmq consume rows error!")
-
- # tmqCom.checkFileContent(consumerId, queryString)
+
+ # tmqCom.checkFileContent(consumerId, queryString)
tmqCom.waitSubscriptionExit(tdSql, topicFromStb1)
tdSql.query("drop topic %s"%topicFromStb1)
diff --git a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py
index 84db8e840b..65515c4822 100644
--- a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py
+++ b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py
@@ -22,12 +22,12 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 100
self.rowsPerTbl = 1000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
-
+
def prepare_udf_so(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -66,20 +66,20 @@ class TDTestCase:
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
tdLog.info(cmdStr)
os.system(cmdStr)
-
+
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
consumeFile = open(consumeRowsFile, mode='r')
queryFile = open(dstFile, mode='r')
-
+
# skip first line for it is schema
queryFile.readline()
while True:
dst = queryFile.readline()
src = consumeFile.readline()
-
+
if dst:
if dst != src:
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
@@ -112,7 +112,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -127,11 +127,11 @@ class TDTestCase:
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
- # tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
-
+
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: multi sub table")
paraDict = {'dbName': 'dbt',
@@ -168,13 +168,13 @@ class TDTestCase:
# tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
# tdLog.info("insert data")
# tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
-
+
tdLog.info("create topics from stb with filter")
queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@@ -190,10 +190,10 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("0 tmq consume rows error!")
@@ -208,7 +208,7 @@ class TDTestCase:
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
consumerId = 1
@@ -218,7 +218,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
@@ -228,12 +228,12 @@ class TDTestCase:
# self.checkFileContent(consumerId, queryString)
# tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
-
+
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: multi sub table, consume with auto create tble and insert data")
paraDict = {'dbName': 'dbt',
@@ -270,13 +270,13 @@ class TDTestCase:
# tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
# tdLog.info("insert data")
# tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
-
+
tdLog.info("create topics from stb with filter")
queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- # tdSql.query(queryString)
+ # tdSql.query(queryString)
# expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@@ -292,18 +292,18 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
- paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl)
+ paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl)
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
- tdSql.query(queryString)
+
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
-
+
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("2 tmq consume rows error!")
@@ -318,7 +318,7 @@ class TDTestCase:
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
consumerId = 3
@@ -328,7 +328,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
@@ -338,7 +338,7 @@ class TDTestCase:
# self.checkFileContent(consumerId, queryString)
# tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
@@ -348,13 +348,13 @@ class TDTestCase:
# tdSql.prepare()
self.prepare_udf_so()
self.create_udf_function()
-
+
tdLog.printNoPrefix("=============================================")
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.prepareTestEnv()
self.tmqCase1()
self.tmqCase2()
-
+
# tdLog.printNoPrefix("====================================================================")
# tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
# self.prepareTestEnv()
diff --git a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py
index 5b964e1d38..1c999b86c7 100644
--- a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py
+++ b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py
@@ -22,12 +22,12 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 100
self.rowsPerTbl = 1000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
-
+
def prepare_udf_so(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -66,20 +66,20 @@ class TDTestCase:
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
tdLog.info(cmdStr)
os.system(cmdStr)
-
+
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
consumeFile = open(consumeRowsFile, mode='r')
queryFile = open(dstFile, mode='r')
-
+
# skip first line for it is schema
queryFile.readline()
while True:
dst = queryFile.readline()
src = consumeFile.readline()
-
+
if dst:
if dst != src:
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
@@ -112,7 +112,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -127,11 +127,11 @@ class TDTestCase:
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
- # tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
-
+
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: multi sub table")
paraDict = {'dbName': 'dbt',
@@ -168,13 +168,13 @@ class TDTestCase:
# tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
# tdLog.info("insert data")
# tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
-
+
tdLog.info("create topics from stb with filter")
queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@@ -190,10 +190,10 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("0 tmq consume rows error!")
@@ -208,7 +208,7 @@ class TDTestCase:
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
consumerId = 1
@@ -218,7 +218,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
@@ -228,12 +228,12 @@ class TDTestCase:
# self.checkFileContent(consumerId, queryString)
# tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
-
+
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: multi sub table, consume with auto create tble and insert data")
paraDict = {'dbName': 'dbt',
@@ -270,13 +270,13 @@ class TDTestCase:
# tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
# tdLog.info("insert data")
# tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
-
+
tdLog.info("create topics from stb with filter")
queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- # tdSql.query(queryString)
+ # tdSql.query(queryString)
# expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@@ -292,18 +292,18 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
- paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl)
+ paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl)
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
- tdSql.query(queryString)
+
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
-
+
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("2 tmq consume rows error!")
@@ -318,7 +318,7 @@ class TDTestCase:
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
consumerId = 3
@@ -328,7 +328,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
@@ -338,7 +338,7 @@ class TDTestCase:
# self.checkFileContent(consumerId, queryString)
# tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
@@ -348,13 +348,13 @@ class TDTestCase:
# tdSql.prepare()
self.prepare_udf_so()
self.create_udf_function()
-
+
# tdLog.printNoPrefix("=============================================")
# tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
# self.prepareTestEnv()
# self.tmqCase1()
# self.tmqCase2()
-
+
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.prepareTestEnv()
diff --git a/tests/system-test/7-tmq/tmqUdf.py b/tests/system-test/7-tmq/tmqUdf.py
index 04067ccf65..6e1843404e 100644
--- a/tests/system-test/7-tmq/tmqUdf.py
+++ b/tests/system-test/7-tmq/tmqUdf.py
@@ -22,12 +22,12 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 1
self.rowsPerTbl = 1000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
-
+
def prepare_udf_so(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -66,20 +66,20 @@ class TDTestCase:
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
tdLog.info(cmdStr)
os.system(cmdStr)
-
+
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
consumeFile = open(consumeRowsFile, mode='r')
queryFile = open(dstFile, mode='r')
-
+
# skip first line for it is schema
queryFile.readline()
while True:
dst = queryFile.readline()
src = consumeFile.readline()
-
+
if dst:
if dst != src:
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
@@ -112,7 +112,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -127,11 +127,11 @@ class TDTestCase:
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
- # tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
-
+
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: one sub table")
paraDict = {'dbName': 'dbt',
@@ -168,13 +168,13 @@ class TDTestCase:
# tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
# tdLog.info("insert data")
# tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
-
+
tdLog.info("create topics from stb with filter")
queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@@ -190,10 +190,10 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
+
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("0 tmq consume rows error!")
@@ -209,7 +209,7 @@ class TDTestCase:
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
consumerId = 1
@@ -219,7 +219,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
@@ -229,12 +229,12 @@ class TDTestCase:
self.checkFileContent(consumerId, queryString)
tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
-
+
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: one sub table, consume with auto create tble and insert data")
paraDict = {'dbName': 'dbt',
@@ -271,13 +271,13 @@ class TDTestCase:
# tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
# tdLog.info("insert data")
# tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
-
+
tdLog.info("create topics from stb with filter")
queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- # tdSql.query(queryString)
+ # tdSql.query(queryString)
# expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
@@ -293,18 +293,18 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
- paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl)
+ paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl)
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
-
- tdSql.query(queryString)
+
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
-
+
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("2 tmq consume rows error!")
@@ -319,7 +319,7 @@ class TDTestCase:
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
- tdSql.query(queryString)
+ tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
consumerId = 3
@@ -329,7 +329,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot'])
- tdLog.info("wait the consume result")
+ tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
@@ -339,7 +339,7 @@ class TDTestCase:
self.checkFileContent(consumerId, queryString)
tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
- time.sleep(10)
+ time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
@@ -349,13 +349,13 @@ class TDTestCase:
# tdSql.prepare()
self.prepare_udf_so()
self.create_udf_function()
-
+
tdLog.printNoPrefix("=============================================")
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.prepareTestEnv()
self.tmqCase1()
self.tmqCase2()
-
+
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.prepareTestEnv()
diff --git a/tests/system-test/7-tmq/tmqUpdate-1ctb.py b/tests/system-test/7-tmq/tmqUpdate-1ctb.py
index 5d891bdedf..12de04cd9c 100644
--- a/tests/system-test/7-tmq/tmqUpdate-1ctb.py
+++ b/tests/system-test/7-tmq/tmqUpdate-1ctb.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 1
self.rowsPerTbl = 10000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -65,8 +65,8 @@ class TDTestCase:
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
- # tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
@@ -96,7 +96,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
# update to half tables
paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
@@ -104,24 +104,24 @@ class TDTestCase:
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+ startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
- tdSql.execute(sqlString)
-
+ tdSql.execute(sqlString)
+
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 0
-
+
if self.snapshot == 0:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/2))
elif self.snapshot == 1:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1))
-
+
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
@@ -143,18 +143,18 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsInserted = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, expect consume rows: %d, act insert rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsInserted))
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error!")
-
- tmqCom.checkFileContent(consumerId, queryString)
+
+ tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
- tdLog.printNoPrefix("======== test case 2: ")
+ tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
@@ -174,15 +174,15 @@ class TDTestCase:
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
-
+
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
- tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ tdLog.info("restart taosd to ensure that the data falls into the disk")
tdSql.query("flush database %s"%(paraDict['dbName']))
-
+
# update to half tables
paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl / 2)
paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
@@ -191,16 +191,16 @@ class TDTestCase:
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+ # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom.initConsumerTable()
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
-
+
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 1
@@ -208,7 +208,7 @@ class TDTestCase:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2))
elif self.snapshot == 1:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1))
-
+
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
@@ -220,7 +220,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
-
+
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -230,13 +230,13 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsInserted = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt))
-
+
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error!")
-
- # tmqCom.checkFileContent(consumerId, queryString)
+
+ # tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
@@ -249,14 +249,14 @@ class TDTestCase:
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.tmqCase1()
self.tmqCase2()
-
+
self.prepareTestEnv()
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
self.tmqCase1()
self.tmqCase2()
-
+
def stop(self):
tdSql.close()
diff --git a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py
index 3b6ae65316..02641d8bcb 100644
--- a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py
+++ b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py
@@ -21,7 +21,7 @@ class TDTestCase:
self.ctbNum = 50
self.rowsPerTbl = 1000
self.autoCtbPrefix = 'aCtb'
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -51,7 +51,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -66,8 +66,8 @@ class TDTestCase:
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=self.autoCtbPrefix,
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
- # tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
@@ -96,7 +96,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
# update to half tables
paraDict['ctbNum'] = int(self.ctbNum/2)
paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
@@ -105,15 +105,15 @@ class TDTestCase:
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+ startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
- tdSql.execute(sqlString)
-
+ tdSql.execute(sqlString)
+
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 0
@@ -121,7 +121,7 @@ class TDTestCase:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2*2))
elif self.snapshot == 1:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2))
-
+
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
@@ -143,18 +143,18 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsInserted = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, expect consume rows: %d, act insert rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsInserted))
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error!")
-
- # tmqCom.checkFileContent(consumerId, queryString)
+
+ # tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
- tdLog.printNoPrefix("======== test case 2: ")
+ tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
@@ -174,15 +174,15 @@ class TDTestCase:
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
-
+
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
- tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ tdLog.info("restart taosd to ensure that the data falls into the disk")
tdSql.query("flush database %s"%(paraDict['dbName']))
-
+
# update to half tables
paraDict['ctbNum'] = int(self.ctbNum/2)
paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
@@ -190,23 +190,23 @@ class TDTestCase:
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=self.autoCtbPrefix,
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2))
-
+
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="aCtby",
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2))
-
+
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2))
tmqCom.initConsumerTable()
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
-
+
# paraDict['ctbNum'] = self.ctbNum
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
@@ -215,7 +215,7 @@ class TDTestCase:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2*2 + 1/2*1/2))
elif self.snapshot == 1:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2))
-
+
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
@@ -227,7 +227,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
-
+
tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -237,13 +237,13 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsInserted = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt))
-
+
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error!")
-
- # tmqCom.checkFileContent(consumerId, queryString)
+
+ # tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
@@ -256,14 +256,14 @@ class TDTestCase:
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.tmqCase1()
self.tmqCase2()
-
+
# self.prepareTestEnv()
# tdLog.printNoPrefix("====================================================================")
# tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
# self.snapshot = 1
# self.tmqCase1()
# self.tmqCase2()
-
+
def stop(self):
tdSql.close()
diff --git a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py
index 0ac897c2cd..baeb70e656 100644
--- a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py
+++ b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py
@@ -21,7 +21,7 @@ class TDTestCase:
self.ctbNum = 50
self.rowsPerTbl = 1000
self.autoCtbPrefix = 'aCtb'
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -51,7 +51,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -66,8 +66,8 @@ class TDTestCase:
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=self.autoCtbPrefix,
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
- # tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
@@ -96,7 +96,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
# update to half tables
paraDict['ctbNum'] = int(self.ctbNum/2)
paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
@@ -105,15 +105,15 @@ class TDTestCase:
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+ startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
- tdSql.execute(sqlString)
-
+ tdSql.execute(sqlString)
+
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 0
@@ -121,7 +121,7 @@ class TDTestCase:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2*2))
elif self.snapshot == 1:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2))
-
+
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
@@ -143,18 +143,18 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsInserted = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, expect consume rows: %d, act insert rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsInserted))
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error!")
-
- # tmqCom.checkFileContent(consumerId, queryString)
+
+ # tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
- tdLog.printNoPrefix("======== test case 2: ")
+ tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
@@ -174,15 +174,15 @@ class TDTestCase:
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
-
+
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
- tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ tdLog.info("restart taosd to ensure that the data falls into the disk")
tdSql.query("flush database %s"%(paraDict['dbName']))
-
+
# update to half tables
paraDict['ctbNum'] = int(self.ctbNum/2)
paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
@@ -190,23 +190,23 @@ class TDTestCase:
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=self.autoCtbPrefix,
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2))
-
+
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="aCtby",
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2))
-
+
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2))
tmqCom.initConsumerTable()
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
-
+
# paraDict['ctbNum'] = self.ctbNum
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
@@ -215,7 +215,7 @@ class TDTestCase:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2*2 + 1/2*1/2))
elif self.snapshot == 1:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2))
-
+
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
@@ -227,7 +227,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
-
+
tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -237,13 +237,13 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsInserted = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt))
-
+
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error!")
-
- # tmqCom.checkFileContent(consumerId, queryString)
+
+ # tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
@@ -256,14 +256,14 @@ class TDTestCase:
# tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
# self.tmqCase1()
# self.tmqCase2()
-
+
self.prepareTestEnv()
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
self.tmqCase1()
self.tmqCase2()
-
+
def stop(self):
tdSql.close()
diff --git a/tests/system-test/7-tmq/tmqUpdate-multiCtb.py b/tests/system-test/7-tmq/tmqUpdate-multiCtb.py
index 892137be43..bde266e634 100644
--- a/tests/system-test/7-tmq/tmqUpdate-multiCtb.py
+++ b/tests/system-test/7-tmq/tmqUpdate-multiCtb.py
@@ -21,7 +21,7 @@ class TDTestCase:
self.ctbNum = 50
self.rowsPerTbl = 1000
self.autoCtbPrefix = 'aCtb'
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -51,7 +51,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
@@ -66,8 +66,8 @@ class TDTestCase:
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=self.autoCtbPrefix,
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
- # tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdSql.query("flush database %s"%(paraDict['dbName']))
return
@@ -96,7 +96,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
# update to half tables
paraDict['ctbNum'] = int(self.ctbNum/2)
paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
@@ -105,15 +105,15 @@ class TDTestCase:
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+ startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
- tdSql.execute(sqlString)
-
+ tdSql.execute(sqlString)
+
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 0
@@ -121,7 +121,7 @@ class TDTestCase:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2*2))
elif self.snapshot == 1:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2))
-
+
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
@@ -143,18 +143,18 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsInserted = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, expect consume rows: %d, act insert rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsInserted))
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error!")
-
- # tmqCom.checkFileContent(consumerId, queryString)
+
+ # tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
- tdLog.printNoPrefix("======== test case 2: ")
+ tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
@@ -174,15 +174,15 @@ class TDTestCase:
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
-
+
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
- tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ tdLog.info("restart taosd to ensure that the data falls into the disk")
tdSql.query("flush database %s"%(paraDict['dbName']))
-
+
# update to half tables
paraDict['ctbNum'] = int(self.ctbNum/2)
paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
@@ -190,23 +190,23 @@ class TDTestCase:
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=self.autoCtbPrefix,
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2))
-
+
tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="aCtby",
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2))
-
+
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2))
tmqCom.initConsumerTable()
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
-
+
# paraDict['ctbNum'] = self.ctbNum
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
@@ -215,7 +215,7 @@ class TDTestCase:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2*2 + 1/2*1/2))
elif self.snapshot == 1:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2))
-
+
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
@@ -227,7 +227,7 @@ class TDTestCase:
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
-
+
tdLog.info("start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
@@ -237,13 +237,13 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsInserted = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt))
-
+
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error!")
-
- # tmqCom.checkFileContent(consumerId, queryString)
+
+ # tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
@@ -256,14 +256,14 @@ class TDTestCase:
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.tmqCase1()
self.tmqCase2()
-
+
self.prepareTestEnv()
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
self.tmqCase1()
self.tmqCase2()
-
+
def stop(self):
tdSql.close()
diff --git a/tests/system-test/7-tmq/tmqUpdateWithConsume.py b/tests/system-test/7-tmq/tmqUpdateWithConsume.py
index 2dd3a061c6..be07ba13a9 100644
--- a/tests/system-test/7-tmq/tmqUpdateWithConsume.py
+++ b/tests/system-test/7-tmq/tmqUpdateWithConsume.py
@@ -20,7 +20,7 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 100
self.rowsPerTbl = 1000
-
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
@@ -50,7 +50,7 @@ class TDTestCase:
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1, wal_retention_size=-1, wal_retention_period=-1)
tdLog.info("create stb")
@@ -65,8 +65,8 @@ class TDTestCase:
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
- # tdLog.info("restart taosd to ensure that the data falls into the disk")
+
+ # tdLog.info("restart taosd to ensure that the data falls into the disk")
tdSql.query("flush database %s"%(paraDict['dbName']))
return
@@ -94,30 +94,30 @@ class TDTestCase:
paraDict['snapshot'] = self.snapshot
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
- paraDict['rowsPerTbl'] = self.rowsPerTbl
-
+ paraDict['rowsPerTbl'] = self.rowsPerTbl
+
# update to half tables
paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
- startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
-
+ startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+
tdLog.info("create topics from stb1")
- topicFromStb1 = 'topic_stb1'
+ topicFromStb1 = 'topic_stb1'
queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
- tdSql.execute(sqlString)
-
+ tdSql.execute(sqlString)
+
# paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
consumerId = 0
-
+
if self.snapshot == 0:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/2 + 1))
elif self.snapshot == 1:
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2))
-
+
topicList = topicFromStb1
ifcheckdata = 1
ifManualCommit = 1
@@ -143,16 +143,16 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsInserted = tdSql.getRows()
-
+
tdLog.info("act consume rows: %d, expect consume rows: %d, act insert rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsInserted))
if self.snapshot == 0:
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error!")
elif self.snapshot == 1:
if not (totalConsumeRows < expectrowcnt and totalConsumeRows >= totalRowsInserted):
- tdLog.exit("tmq consume rows error!")
-
- # tmqCom.checkFileContent(consumerId, queryString)
+ tdLog.exit("tmq consume rows error!")
+
+ # tmqCom.checkFileContent(consumerId, queryString)
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ")
@@ -162,7 +162,7 @@ class TDTestCase:
self.ctbNum = 1
self.snapshot = 0
self.prepareTestEnv()
- self.tmqCase1()
+ self.tmqCase1()
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.prepareTestEnv()
@@ -178,7 +178,7 @@ class TDTestCase:
self.prepareTestEnv()
self.snapshot = 1
self.tmqCase1()
-
+
def stop(self):
tdSql.close()
diff --git a/tests/system-test/7-tmq/tmq_taosx.py b/tests/system-test/7-tmq/tmq_taosx.py
index 2a819f8106..cd13535684 100644
--- a/tests/system-test/7-tmq/tmq_taosx.py
+++ b/tests/system-test/7-tmq/tmq_taosx.py
@@ -45,7 +45,7 @@ class TDTestCase:
break
tdSql.execute('use db_taosx')
- tdSql.query("select * from ct3 order by c1 desc")
+ tdSql.query("select * from ct3 order by c1 desc")
tdSql.checkRows(2)
tdSql.checkData(0, 1, 51)
tdSql.checkData(0, 4, 940)
@@ -58,17 +58,17 @@ class TDTestCase:
tdSql.query("select * from ct2")
tdSql.checkRows(0)
- tdSql.query("select * from ct0 order by c1 ")
+ tdSql.query("select * from ct0 order by c1")
tdSql.checkRows(2)
tdSql.checkData(0, 3, "a")
tdSql.checkData(1, 4, None)
- tdSql.query("select * from n1 order by ts")
+ tdSql.query("select * from n1 order by cc3 desc")
tdSql.checkRows(2)
tdSql.checkData(0, 1, "eeee")
tdSql.checkData(1, 2, 940)
- tdSql.query("select * from jt order by i desc;")
+ tdSql.query("select * from jt order by i desc")
tdSql.checkRows(2)
tdSql.checkData(0, 1, 11)
tdSql.checkData(0, 2, None)
@@ -85,7 +85,5 @@ class TDTestCase:
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
-event = threading.Event()
-
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh
index 458951b815..7a3ad1070c 100755
--- a/tests/system-test/fulltest.sh
+++ b/tests/system-test/fulltest.sh
@@ -426,4 +426,5 @@ python3 ./test.py -f 2-query/function_null.py -Q 3
python3 ./test.py -f 2-query/count_partition.py -Q 3
python3 ./test.py -f 2-query/max_partition.py -Q 3
python3 ./test.py -f 2-query/last_row.py -Q 3
-python3 ./test.py -f 2-query/tsbsQuery.py -Q 3
\ No newline at end of file
+python3 ./test.py -f 2-query/tsbsQuery.py -Q 3
+python3 ./test.py -f 2-query/sml.py -Q 3
\ No newline at end of file
diff --git a/tests/system-test/simpletest.bat b/tests/system-test/simpletest.bat
index e33fe0d538..656828aa1e 100644
--- a/tests/system-test/simpletest.bat
+++ b/tests/system-test/simpletest.bat
@@ -6,7 +6,7 @@ python3 .\test.py -f 0-others\telemetry.py
python3 .\test.py -f 0-others\taosdMonitor.py
python3 .\test.py -f 0-others\udfTest.py
python3 .\test.py -f 0-others\udf_create.py
-@REM python3 .\test.py -f 0-others\udf_restart_taosd.py
+python3 .\test.py -f 0-others\udf_restart_taosd.py
@REM python3 .\test.py -f 0-others\cachelast.py
@REM python3 .\test.py -f 0-others\user_control.py
diff --git a/tests/test/c/CMakeLists.txt b/tests/test/c/CMakeLists.txt
index 605eef9be3..31331b5265 100644
--- a/tests/test/c/CMakeLists.txt
+++ b/tests/test/c/CMakeLists.txt
@@ -2,6 +2,7 @@ add_executable(tmq_demo tmqDemo.c)
add_executable(tmq_sim tmqSim.c)
add_executable(create_table createTable.c)
add_executable(tmq_taosx_ci tmq_taosx_ci.c)
+add_executable(sml_test sml_test.c)
target_link_libraries(
create_table
PUBLIC taos_static
@@ -31,6 +32,14 @@ target_link_libraries(
PUBLIC os
)
+target_link_libraries(
+ sml_test
+ PUBLIC taos_static
+ PUBLIC util
+ PUBLIC common
+ PUBLIC os
+)
+
add_executable(sdbDump sdbDump.c)
target_link_libraries(
sdbDump
diff --git a/tests/test/c/sml_test.c b/tests/test/c/sml_test.c
new file mode 100644
index 0000000000..50249a5c56
--- /dev/null
+++ b/tests/test/c/sml_test.c
@@ -0,0 +1,1133 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include "taos.h"
+#include "types.h"
+
+int smlProcess_influx_Test() {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "use sml_db");
+ taos_free_result(pRes);
+
+ const char *sql[] = {
+ "readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 "
+ "load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation="
+ "124,velocity=0,heading=221,grade=0 1451606401000000000",
+ "readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 "
+ "load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation="
+ "124,velocity=0,heading=221,grade=0,fuel_consumption=25 1451607402000000000",
+ "readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 "
+ "load_capacity=1500,fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation="
+ "124,heading=221,grade=0,fuel_consumption=25 1451608403000000000",
+ "readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 "
+ "fuel_capacity=150,nominal_fuel_consumption=12,latitude=52.31854,longitude=4.72037,elevation=124,velocity=0,"
+ "heading=221,grade=0,fuel_consumption=25 1451609404000000000",
+ "readings,name=truck_0,fleet=South,driver=Trish,model=H-2,device_version=v2.3 fuel_consumption=25,grade=0 "
+ "1451619405000000000",
+ "readings,name=truck_1,fleet=South,driver=Albert,model=F-150,device_version=v1.5 "
+ "load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=72.45258,longitude=68.83761,elevation="
+ "255,velocity=0,heading=181,grade=0,fuel_consumption=25 1451606406000000000",
+ "readings,name=truck_2,driver=Derek,model=F-150,device_version=v1.5 "
+ "load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=24.5208,longitude=28.09377,elevation="
+ "428,velocity=0,heading=304,grade=0,fuel_consumption=25 1451606407000000000",
+ "readings,name=truck_2,fleet=North,driver=Derek,model=F-150 "
+ "load_capacity=2000,fuel_capacity=200,nominal_fuel_consumption=15,latitude=24.5208,longitude=28.09377,elevation="
+ "428,velocity=0,heading=304,grade=0,fuel_consumption=25 1451609408000000000",
+ "readings,fleet=South,name=truck_0,driver=Trish,model=H-2,device_version=v2.3 fuel_consumption=25,grade=0 "
+ "1451629409000000000",
+ "stable,t1=t1,t2=t2,t3=t3 c1=1,c2=2,c3=\"kk\",c4=4 1451629501000000000",
+ "stable,t2=t2,t1=t1,t3=t3 c1=1,c3=\"\",c4=4 1451629602000000000",
+ };
+ pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, 0);
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ int code = taos_errno(pRes);
+ taos_free_result(pRes);
+ return code;
+}
+
+int smlProcess_telnet_Test() {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "use sml_db");
+ taos_free_result(pRes);
+
+ const char *sql[] = {"sys.if.bytes.out 1479496100 1.3E0 host=web01 interface=eth0",
+ "sys.if.bytes.out 1479496101 1.3E1 interface=eth0 host=web01 ",
+ "sys.if.bytes.out 1479496102 1.3E3 network=tcp",
+ " sys.procs.running 1479496100 42 host=web01 "};
+
+ pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_TELNET_PROTOCOL,
+ TSDB_SML_TIMESTAMP_NANO_SECONDS);
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ int code = taos_errno(pRes);
+ taos_free_result(pRes);
+ return code;
+}
+
+int smlProcess_json1_Test() {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "use sml_db");
+ taos_free_result(pRes);
+
+ const char *sql[] = {
+ "["
+ " {"
+ " \"metric\": \"sys.cpu.nice\","
+ " \"timestamp\": 0,"
+ " \"value\": 18,"
+ " \"tags\": {"
+ " \"host\": \"web01\","
+ " \"id\": \"t1\","
+ " \"dc\": \"lga\""
+ " }"
+ " },"
+ " {"
+ " \"metric\": \"sys.cpu.nice\","
+ " \"timestamp\": 1346846400,"
+ " \"value\": 9,"
+ " \"tags\": {"
+ " \"host\": \"web02\","
+ " \"dc\": \"lga\""
+ " }"
+ " }"
+ "]"};
+ pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_JSON_PROTOCOL,
+ TSDB_SML_TIMESTAMP_NANO_SECONDS);
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ int code = taos_errno(pRes);
+ taos_free_result(pRes);
+ return code;
+}
+
+int smlProcess_json2_Test() {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "use sml_db");
+ taos_free_result(pRes);
+
+ const char *sql[] = {
+ "{"
+ " \"metric\": \"meter_current0\","
+ " \"timestamp\": {"
+ " \"value\" : 1346846400,"
+ " \"type\" : \"s\""
+ " },"
+ " \"value\": {"
+ " \"value\" : 10.3,"
+ " \"type\" : \"i64\""
+ " },"
+ " \"tags\": {"
+ " \"groupid\": { "
+ " \"value\" : 2,"
+ " \"type\" : \"bigint\""
+ " },"
+ " \"location\": { "
+ " \"value\" : \"北京\","
+ " \"type\" : \"binary\""
+ " },"
+ " \"id\": \"d1001\""
+ " }"
+ "}"};
+ pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_JSON_PROTOCOL,
+ TSDB_SML_TIMESTAMP_NANO_SECONDS);
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ int code = taos_errno(pRes);
+ taos_free_result(pRes);
+ return code;
+}
+
+int smlProcess_json3_Test() {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "use sml_db");
+ taos_free_result(pRes);
+
+ const char *sql[] = {
+ "{"
+ " \"metric\": \"meter_current1\","
+ " \"timestamp\": {"
+ " \"value\" : 1346846400,"
+ " \"type\" : \"s\""
+ " },"
+ " \"value\": {"
+ " \"value\" : 10.3,"
+ " \"type\" : \"i64\""
+ " },"
+ " \"tags\": {"
+ " \"t1\": { "
+ " \"value\" : 2,"
+ " \"type\" : \"bigint\""
+ " },"
+ " \"t2\": { "
+ " \"value\" : 2,"
+ " \"type\" : \"int\""
+ " },"
+ " \"t3\": { "
+ " \"value\" : 2,"
+ " \"type\" : \"i16\""
+ " },"
+ " \"t4\": { "
+ " \"value\" : 2,"
+ " \"type\" : \"i8\""
+ " },"
+ " \"t5\": { "
+ " \"value\" : 2,"
+ " \"type\" : \"f32\""
+ " },"
+ " \"t6\": { "
+ " \"value\" : 2,"
+ " \"type\" : \"double\""
+ " },"
+ " \"t7\": { "
+ " \"value\" : \"8323\","
+ " \"type\" : \"binary\""
+ " },"
+ " \"t8\": { "
+ " \"value\" : \"北京\","
+ " \"type\" : \"nchar\""
+ " },"
+ " \"t9\": { "
+ " \"value\" : true,"
+ " \"type\" : \"bool\""
+ " },"
+ " \"id\": \"d1001\""
+ " }"
+ "}"};
+ pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_JSON_PROTOCOL,
+ TSDB_SML_TIMESTAMP_NANO_SECONDS);
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ int code = taos_errno(pRes);
+ taos_free_result(pRes);
+ return code;
+}
+
+int smlProcess_json4_Test() {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "use sml_db");
+ taos_free_result(pRes);
+
+ const char *sql[] = {
+ "{"
+ " \"metric\": \"meter_current2\","
+ " \"timestamp\": {"
+ " \"value\" : 1346846500000,"
+ " \"type\" : \"ms\""
+ " },"
+ " \"value\": \"ni\","
+ " \"tags\": {"
+ " \"t1\": { "
+ " \"value\" : 20,"
+ " \"type\" : \"i64\""
+ " },"
+ " \"t2\": { "
+ " \"value\" : 25,"
+ " \"type\" : \"i32\""
+ " },"
+ " \"t3\": { "
+ " \"value\" : 2,"
+ " \"type\" : \"smallint\""
+ " },"
+ " \"t4\": { "
+ " \"value\" : 2,"
+ " \"type\" : \"tinyint\""
+ " },"
+ " \"t5\": { "
+ " \"value\" : 2,"
+ " \"type\" : \"float\""
+ " },"
+ " \"t6\": { "
+ " \"value\" : 0.2,"
+ " \"type\" : \"f64\""
+ " },"
+ " \"t7\": \"nsj\","
+ " \"t8\": { "
+ " \"value\" : \"北京\","
+ " \"type\" : \"nchar\""
+ " },"
+ " \"t9\": false,"
+ " \"id\": \"d1001\""
+ " }"
+ "}"};
+ pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_JSON_PROTOCOL,
+ TSDB_SML_TIMESTAMP_NANO_SECONDS);
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ int code = taos_errno(pRes);
+ taos_free_result(pRes);
+ return code;
+}
+
+int sml_TD15662_Test() {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db precision 'ns' schemaless 1");
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "use sml_db");
+ taos_free_result(pRes);
+
+ const char *sql[] = {
+ "hetrey c0=f,c1=127i8 1626006833639",
+ "hetrey,t1=r c0=f,c1=127i8 1626006833640",
+ };
+ pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL,
+ TSDB_SML_TIMESTAMP_MILLI_SECONDS);
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ int code = taos_errno(pRes);
+ taos_free_result(pRes);
+ return code;
+}
+
+int sml_TD15742_Test() {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "use sml_db");
+ taos_free_result(pRes);
+
+ const char *sql[] = {
+ "test_ms,t0=t c0=f 1626006833641",
+ };
+ pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL,
+ TSDB_SML_TIMESTAMP_MILLI_SECONDS);
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ int code = taos_errno(pRes);
+ taos_free_result(pRes);
+ return code;
+}
+
+int sml_16384_Test() {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
+ taos_free_result(pRes);
+
+ const char *sql[] = {
+ "qelhxo,id=pnnqhsa,t0=t,t1=127i8 c0=t,c1=127i8 1626006833639000000",
+ };
+
+ pRes = taos_query(taos, "use sml_db");
+ taos_free_result(pRes);
+
+ pRes = taos_schemaless_insert(taos, (char **)sql, 1, TSDB_SML_LINE_PROTOCOL, 0);
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ int code = taos_errno(pRes);
+ taos_free_result(pRes);
+ if(code) return code;
+
+ const char *sql1[] = {
+ "qelhxo,id=pnnqhsa,t0=t,t1=127i8 c0=f,c1=127i8,c11=L\"ncharColValue\",c10=t 1626006833639000000",
+ };
+ pRes = taos_schemaless_insert(taos, (char **)sql1, 1, TSDB_SML_LINE_PROTOCOL, 0);
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ code = taos_errno(pRes);
+ taos_free_result(pRes);
+ return code;
+}
+
+int sml_oom_Test() {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
+ taos_free_result(pRes);
+
+ const char *sql[] = {
+ //"test_ms,t0=t c0=f 1626006833641",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"pgxbrbga\",t8=L\"ncharTagValue\" "
+ "c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"gviggpmi\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"cexkarjn\",t8=L\"ncharTagValue\" "
+ "c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"rzwwuoxu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"xphrlkey\",t8=L\"ncharTagValue\" "
+ "c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"llsawebj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"jwpkipff\",t8=L\"ncharTagValue\" "
+ "c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"euzzhcvu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"jumhnsvw\",t8=L\"ncharTagValue\" "
+ "c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"fnetgdhj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"vrmmpgqe\",t8=L\"ncharTagValue\" "
+ "c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"lnpfjapr\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"gvbhmsfr\",t8=L\"ncharTagValue\" "
+ "c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"kydxrxwc\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"pfyarryq\",t8=L\"ncharTagValue\" "
+ "c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"uxptotap\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"prolhudh\",t8=L\"ncharTagValue\" "
+ "c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"ttxaxnac\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"dfgvmjmz\",t8=L\"ncharTagValue\" "
+ "c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"bloextkn\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"dvjxwzsi\",t8=L\"ncharTagValue\" "
+ "c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"aigjomaf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"refbidtf\",t8=L\"ncharTagValue\" "
+ "c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"vuanlfpz\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"nbpajxkx\",t8=L\"ncharTagValue\" "
+ "c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"ktzzauxh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"prcwdjct\",t8=L\"ncharTagValue\" "
+ "c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"vmbhvjtp\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"liuddtuz\",t8=L\"ncharTagValue\" "
+ "c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"pddsktow\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"algldlvl\",t8=L\"ncharTagValue\" "
+ "c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"mlmnjgdl\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"oiynpcog\",t8=L\"ncharTagValue\" "
+ "c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"wmynbagb\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"asvyulrm\",t8=L\"ncharTagValue\" "
+ "c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"ohaacrkp\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"ytyejhiq\",t8=L\"ncharTagValue\" "
+ "c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"bbznuerb\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"lpebcibw\",t8=L\"ncharTagValue\" "
+ "c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"xmqrbafv\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"lnmwpdne\",t8=L\"ncharTagValue\" "
+ "c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"jpcsjqun\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"mmxqmavz\",t8=L\"ncharTagValue\" "
+ "c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"hhsbgaow\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"uwogyuud\",t8=L\"ncharTagValue\" "
+ "c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"ytxpaxnk\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"wouwdvtt\",t8=L\"ncharTagValue\" "
+ "c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"iitwikkh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"lgyzuyaq\",t8=L\"ncharTagValue\" "
+ "c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"bdtiigxi\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"qpnsvdhw\",t8=L\"ncharTagValue\" "
+ "c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"pjxihgvu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"ksxkfetn\",t8=L\"ncharTagValue\" "
+ "c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"ocukufqs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"qzerxmpe\",t8=L\"ncharTagValue\" "
+ "c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"qwcfdyxs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"jldrpmmd\",t8=L\"ncharTagValue\" "
+ "c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"lucxlfzc\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"rcewrvya\",t8=L\"ncharTagValue\" "
+ "c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"dknvaphs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"nxtxgzdr\",t8=L\"ncharTagValue\" "
+ "c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"mbvuugwz\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"uikakffu\",t8=L\"ncharTagValue\" "
+ "c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"mwmtqsma\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"bfcxrrpa\",t8=L\"ncharTagValue\" "
+ "c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"ksajygdj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"vmhhszyv\",t8=L\"ncharTagValue\" "
+ "c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"urwjgvut\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"jrvytcxy\",t8=L\"ncharTagValue\" "
+ "c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"evqkzygh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"zitdznhg\",t8=L\"ncharTagValue\" "
+ "c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"tpqekrxa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"yrrbgjtk\",t8=L\"ncharTagValue\" "
+ "c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"bnphiuyq\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"huknehjn\",t8=L\"ncharTagValue\" "
+ "c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"iudbxfke\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"fjmolwbn\",t8=L\"ncharTagValue\" "
+ "c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"gukzgcjs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"bjvdtlgq\",t8=L\"ncharTagValue\" "
+ "c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"phxnesxh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"qgpgckvc\",t8=L\"ncharTagValue\" "
+ "c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"yechqtfa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"pbouxywy\",t8=L\"ncharTagValue\" "
+ "c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"kxtuojyo\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"txaniwlj\",t8=L\"ncharTagValue\" "
+ "c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"fixgufrj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"okzvalwq\",t8=L\"ncharTagValue\" "
+ "c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"iitawgbn\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"gayvmird\",t8=L\"ncharTagValue\" "
+ "c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"dprkfjph\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"kmuccshq\",t8=L\"ncharTagValue\" "
+ "c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"vkslsdsd\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"dukccdqk\",t8=L\"ncharTagValue\" "
+ "c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"leztxmqf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"kltixbwz\",t8=L\"ncharTagValue\" "
+ "c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"xqhkweef\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"idxsimvz\",t8=L\"ncharTagValue\" "
+ "c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"vbruvcpk\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"uxandqkd\",t8=L\"ncharTagValue\" "
+ "c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"dsiosysh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"kxuyanpp\",t8=L\"ncharTagValue\" "
+ "c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"wkrktags\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"yvizzpiv\",t8=L\"ncharTagValue\" "
+ "c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"ddnefben\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"novmfmbc\",t8=L\"ncharTagValue\" "
+ "c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"fnusxsfu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"ouerfjap\",t8=L\"ncharTagValue\" "
+ "c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"sigognkf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"slvzhede\",t8=L\"ncharTagValue\" "
+ "c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"bknerect\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"tmhcdfjb\",t8=L\"ncharTagValue\" "
+ "c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"hpnoanpp\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"okmhelnc\",t8=L\"ncharTagValue\" "
+ "c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"xcernjin\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"jdmiismg\",t8=L\"ncharTagValue\" "
+ "c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"tmnqozrf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"zgwrftkx\",t8=L\"ncharTagValue\" "
+ "c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"zyamlwwh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"nuedqcro\",t8=L\"ncharTagValue\" "
+ "c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"lpsvyqaa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"mneitsul\",t8=L\"ncharTagValue\" "
+ "c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"vpleinwb\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"njxuaedy\",t8=L\"ncharTagValue\" "
+ "c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"sdgxpqmu\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"yjirrebp\",t8=L\"ncharTagValue\" "
+ "c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"ikqndzfj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"ghnfdxhr\",t8=L\"ncharTagValue\" "
+ "c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"hrwczpvo\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"nattumpb\",t8=L\"ncharTagValue\" "
+ "c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"zoyfzazn\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"rdwemofy\",t8=L\"ncharTagValue\" "
+ "c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"phkgsjeg\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"pyhvvjrt\",t8=L\"ncharTagValue\" "
+ "c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"zfslyton\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"bxwjzeri\",t8=L\"ncharTagValue\" "
+ "c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"uovzzgjv\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"cfjmacvr\",t8=L\"ncharTagValue\" "
+ "c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"jefqgzqx\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"njrksxmr\",t8=L\"ncharTagValue\" "
+ "c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"mhvabvgn\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"kfekjltr\",t8=L\"ncharTagValue\" "
+ "c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"lexfaaby\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"zbblsmwq\",t8=L\"ncharTagValue\" "
+ "c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"oqcombkx\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"rcdmhzyw\",t8=L\"ncharTagValue\" "
+ "c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"otksuean\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"itbdvowq\",t8=L\"ncharTagValue\" "
+ "c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"tswtmhex\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"xoukkzid\",t8=L\"ncharTagValue\" "
+ "c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"guangmpq\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"rayxzuky\",t8=L\"ncharTagValue\" "
+ "c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"lspwucrv\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"pdprzzkf\",t8=L\"ncharTagValue\" "
+ "c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"sddqrtza\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"kabndgkx\",t8=L\"ncharTagValue\" "
+ "c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"aglnqqxs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"fiwpzmdr\",t8=L\"ncharTagValue\" "
+ "c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"hxctooen\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"pckjpwyh\",t8=L\"ncharTagValue\" "
+ "c0=false,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"ivmvsbai\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"eljdclst\",t8=L\"ncharTagValue\" "
+ "c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"rwgdctie\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"zlnthxoz\",t8=L\"ncharTagValue\" "
+ "c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"ljtxelle\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"llfggdpy\",t8=L\"ncharTagValue\" "
+ "c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"tvnridze\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"hxjpgube\",t8=L\"ncharTagValue\" "
+ "c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"zmldmquq\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"bggqwcoj\",t8=L\"ncharTagValue\" "
+ "c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"drksfofm\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"jcsixens\",t8=L\"ncharTagValue\" "
+ "c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"cdwnwhaf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"nngpumuq\",t8=L\"ncharTagValue\" "
+ "c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"hylgooci\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"cozeyjys\",t8=L\"ncharTagValue\" "
+ "c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"lcgpfcsa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"qdtzhtyd\",t8=L\"ncharTagValue\" "
+ "c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"txpubynb\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"gbslzbtu\",t8=L\"ncharTagValue\" "
+ "c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"buihcpcl\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"ayqezaiq\",t8=L\"ncharTagValue\" "
+ "c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"zgkgtilj\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"bcjopqif\",t8=L\"ncharTagValue\" "
+ "c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"mfzxiaqt\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"xmnlqxoj\",t8=L\"ncharTagValue\" "
+ "c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"reyiklyf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"xssuomhk\",t8=L\"ncharTagValue\" "
+ "c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"liazkjll\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"nigjlblo\",t8=L\"ncharTagValue\" "
+ "c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"vmojyznk\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"dotkbvrz\",t8=L\"ncharTagValue\" "
+ "c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"kuwdyydw\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"slsfqydw\",t8=L\"ncharTagValue\" "
+ "c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"zyironhd\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"pktwfhzi\",t8=L\"ncharTagValue\" "
+ "c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"xybavsvh\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"pyrxemvx\",t8=L\"ncharTagValue\" "
+ "c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"tlfihwjs\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ogirwqci,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,"
+ "t7=\"neumakmg\",t8=L\"ncharTagValue\" "
+ "c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="
+ "\"wxqingoa\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ };
+ pRes = taos_query(taos, "use sml_db");
+ taos_free_result(pRes);
+
+ pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, 0);
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ int code = taos_errno(pRes);
+ taos_free_result(pRes);
+ return code;
+}
+
+int sml_16368_Test() {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "use sml_db");
+ taos_free_result(pRes);
+
+ const char *sql[] = {
+ "[{\"metric\": \"st123456\", \"timestamp\": {\"value\": 1626006833639000, \"type\": \"us\"}, \"value\": 1, "
+ "\"tags\": {\"t1\": 3, \"t2\": {\"value\": 4, \"type\": \"double\"}, \"t3\": {\"value\": \"t3\", \"type\": "
+ "\"binary\"}}},"
+ "{\"metric\": \"st123456\", \"timestamp\": {\"value\": 1626006833739000, \"type\": \"us\"}, \"value\": 2, "
+ "\"tags\": {\"t1\": {\"value\": 4, \"type\": \"double\"}, \"t3\": {\"value\": \"t4\", \"type\": \"binary\"}, "
+ "\"t2\": {\"value\": 5, \"type\": \"double\"}, \"t4\": {\"value\": 5, \"type\": \"double\"}}},"
+ "{\"metric\": \"stb_name\", \"timestamp\": {\"value\": 1626006833639100, \"type\": \"us\"}, \"value\": 3, "
+ "\"tags\": {\"t2\": {\"value\": 5, \"type\": \"double\"}, \"t3\": {\"value\": \"ste\", \"type\": \"nchar\"}}},"
+ "{\"metric\": \"stf567890\", \"timestamp\": {\"value\": 1626006833639200, \"type\": \"us\"}, \"value\": 4, "
+ "\"tags\": {\"t1\": {\"value\": 4, \"type\": \"bigint\"}, \"t3\": {\"value\": \"t4\", \"type\": \"binary\"}, "
+ "\"t2\": {\"value\": 5, \"type\": \"double\"}, \"t4\": {\"value\": 5, \"type\": \"double\"}}},"
+ "{\"metric\": \"st123456\", \"timestamp\": {\"value\": 1626006833639300, \"type\": \"us\"}, \"value\": "
+ "{\"value\": 5, \"type\": \"double\"}, \"tags\": {\"t1\": {\"value\": 4, \"type\": \"double\"}, \"t2\": 5.0, "
+ "\"t3\": {\"value\": \"t4\", \"type\": \"binary\"}}},"
+ "{\"metric\": \"stb_name\", \"timestamp\": {\"value\": 1626006833639400, \"type\": \"us\"}, \"value\": "
+ "{\"value\": 6, \"type\": \"double\"}, \"tags\": {\"t2\": 5.0, \"t3\": {\"value\": \"ste2\", \"type\": "
+ "\"nchar\"}}},"
+ "{\"metric\": \"stb_name\", \"timestamp\": {\"value\": 1626006834639400, \"type\": \"us\"}, \"value\": "
+ "{\"value\": 7, \"type\": \"double\"}, \"tags\": {\"t2\": {\"value\": 5.0, \"type\": \"double\"}, \"t3\": "
+ "{\"value\": \"ste2\", \"type\": \"nchar\"}}},"
+ "{\"metric\": \"st123456\", \"timestamp\": {\"value\": 1626006833839006, \"type\": \"us\"}, \"value\": "
+ "{\"value\": 8, \"type\": \"double\"}, \"tags\": {\"t1\": {\"value\": 4, \"type\": \"double\"}, \"t3\": "
+ "{\"value\": \"t4\", \"type\": \"binary\"}, \"t2\": {\"value\": 5, \"type\": \"double\"}, \"t4\": {\"value\": 5, "
+ "\"type\": \"double\"}}},"
+ "{\"metric\": \"st123456\", \"timestamp\": {\"value\": 1626006833939007, \"type\": \"us\"}, \"value\": "
+ "{\"value\": 9, \"type\": \"double\"}, \"tags\": {\"t1\": 4, \"t3\": {\"value\": \"t4\", \"type\": \"binary\"}, "
+ "\"t2\": {\"value\": 5, \"type\": \"double\"}, \"t4\": {\"value\": 5, \"type\": \"double\"}}}]"};
+ pRes = taos_schemaless_insert(taos, (char **)sql, 0, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_MICRO_SECONDS);
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ int code = taos_errno(pRes);
+ taos_free_result(pRes);
+ return code;
+}
+
+int sml_dup_time_Test() {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
+ taos_free_result(pRes);
+
+ const char *sql[] = {//"test_ms,t0=t c0=f 1626006833641",
+ "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11."
+ "12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" "
+ "c0=false,c1=1i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22."
+ "123456789f64,c7=\"xcxvwjvf\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000",
+ "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11."
+ "12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" "
+ "c0=T,c1=2i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22."
+ "123456789f64,c7=\"fixrzcuq\",c8=L\"ncharColValue\",c9=7u64 1626006834639000000",
+ "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11."
+ "12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" "
+ "c0=t,c1=3i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22."
+ "123456789f64,c7=\"iupzdqub\",c8=L\"ncharColValue\",c9=7u64 1626006835639000000",
+ "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11."
+ "12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" "
+ "c0=t,c1=4i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22."
+ "123456789f64,c7=\"yvvtzzof\",c8=L\"ncharColValue\",c9=7u64 1626006836639000000",
+ "ubzlsr,id=qmtcvgd,t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11."
+ "12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" "
+ "c0=t,c1=5i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22."
+ "123456789f64,c7=\"vbxpilkj\",c8=L\"ncharColValue\",c9=7u64 1626006837639000000"};
+ pRes = taos_query(taos, "use sml_db");
+ taos_free_result(pRes);
+
+ pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, 0);
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ int code = taos_errno(pRes);
+ taos_free_result(pRes);
+ return code;
+}
+
+int sml_16960_Test() {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "use sml_db");
+ taos_free_result(pRes);
+
+ const char *sql[] = {
+ "["
+ "{"
+ "\"timestamp\":"
+ ""
+ "{ \"value\": 1349020800000, \"type\": \"ms\" }"
+ ","
+ "\"value\":"
+ ""
+ "{ \"value\": 830525384, \"type\": \"int\" }"
+ ","
+ "\"tags\": {"
+ "\"id\": \"stb00_0\","
+ "\"t0\":"
+ ""
+ "{ \"value\": 83972721, \"type\": \"int\" }"
+ ","
+ "\"t1\":"
+ ""
+ "{ \"value\": 539147525, \"type\": \"int\" }"
+ ","
+ "\"t2\":"
+ ""
+ "{ \"value\": 618258572, \"type\": \"int\" }"
+ ","
+ "\"t3\":"
+ ""
+ "{ \"value\": -10536201, \"type\": \"int\" }"
+ ","
+ "\"t4\":"
+ ""
+ "{ \"value\": 349227409, \"type\": \"int\" }"
+ ","
+ "\"t5\":"
+ ""
+ "{ \"value\": 249347042, \"type\": \"int\" }"
+ "},"
+ "\"metric\": \"stb0\""
+ "},"
+ "{"
+ "\"timestamp\":"
+ ""
+ "{ \"value\": 1349020800001, \"type\": \"ms\" }"
+ ","
+ "\"value\":"
+ ""
+ "{ \"value\": -588348364, \"type\": \"int\" }"
+ ","
+ "\"tags\": {"
+ "\"id\": \"stb00_0\","
+ "\"t0\":"
+ ""
+ "{ \"value\": 83972721, \"type\": \"int\" }"
+ ","
+ "\"t1\":"
+ ""
+ "{ \"value\": 539147525, \"type\": \"int\" }"
+ ","
+ "\"t2\":"
+ ""
+ "{ \"value\": 618258572, \"type\": \"int\" }"
+ ","
+ "\"t3\":"
+ ""
+ "{ \"value\": -10536201, \"type\": \"int\" }"
+ ","
+ "\"t4\":"
+ ""
+ "{ \"value\": 349227409, \"type\": \"int\" }"
+ ","
+ "\"t5\":"
+ ""
+ "{ \"value\": 249347042, \"type\": \"int\" }"
+ "},"
+ "\"metric\": \"stb0\""
+ "},"
+ "{"
+ "\"timestamp\":"
+ ""
+ "{ \"value\": 1349020800002, \"type\": \"ms\" }"
+ ","
+ "\"value\":"
+ ""
+ "{ \"value\": -370310823, \"type\": \"int\" }"
+ ","
+ "\"tags\": {"
+ "\"id\": \"stb00_0\","
+ "\"t0\":"
+ ""
+ "{ \"value\": 83972721, \"type\": \"int\" }"
+ ","
+ "\"t1\":"
+ ""
+ "{ \"value\": 539147525, \"type\": \"int\" }"
+ ","
+ "\"t2\":"
+ ""
+ "{ \"value\": 618258572, \"type\": \"int\" }"
+ ","
+ "\"t3\":"
+ ""
+ "{ \"value\": -10536201, \"type\": \"int\" }"
+ ","
+ "\"t4\":"
+ ""
+ "{ \"value\": 349227409, \"type\": \"int\" }"
+ ","
+ "\"t5\":"
+ ""
+ "{ \"value\": 249347042, \"type\": \"int\" }"
+ "},"
+ "\"metric\": \"stb0\""
+ "},"
+ "{"
+ "\"timestamp\":"
+ ""
+ "{ \"value\": 1349020800003, \"type\": \"ms\" }"
+ ","
+ "\"value\":"
+ ""
+ "{ \"value\": -811250191, \"type\": \"int\" }"
+ ","
+ "\"tags\": {"
+ "\"id\": \"stb00_0\","
+ "\"t0\":"
+ ""
+ "{ \"value\": 83972721, \"type\": \"int\" }"
+ ","
+ "\"t1\":"
+ ""
+ "{ \"value\": 539147525, \"type\": \"int\" }"
+ ","
+ "\"t2\":"
+ ""
+ "{ \"value\": 618258572, \"type\": \"int\" }"
+ ","
+ "\"t3\":"
+ ""
+ "{ \"value\": -10536201, \"type\": \"int\" }"
+ ","
+ "\"t4\":"
+ ""
+ "{ \"value\": 349227409, \"type\": \"int\" }"
+ ","
+ "\"t5\":"
+ ""
+ "{ \"value\": 249347042, \"type\": \"int\" }"
+ "},"
+ "\"metric\": \"stb0\""
+ "},"
+ "{"
+ "\"timestamp\":"
+ ""
+ "{ \"value\": 1349020800004, \"type\": \"ms\" }"
+ ","
+ "\"value\":"
+ ""
+ "{ \"value\": -330340558, \"type\": \"int\" }"
+ ","
+ "\"tags\": {"
+ "\"id\": \"stb00_0\","
+ "\"t0\":"
+ ""
+ "{ \"value\": 83972721, \"type\": \"int\" }"
+ ","
+ "\"t1\":"
+ ""
+ "{ \"value\": 539147525, \"type\": \"int\" }"
+ ","
+ "\"t2\":"
+ ""
+ "{ \"value\": 618258572, \"type\": \"int\" }"
+ ","
+ "\"t3\":"
+ ""
+ "{ \"value\": -10536201, \"type\": \"int\" }"
+ ","
+ "\"t4\":"
+ ""
+ "{ \"value\": 349227409, \"type\": \"int\" }"
+ ","
+ "\"t5\":"
+ ""
+ "{ \"value\": 249347042, \"type\": \"int\" }"
+ "},"
+ "\"metric\": \"stb0\""
+ "}"
+ "]"};
+
+ pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_JSON_PROTOCOL,
+ TSDB_SML_TIMESTAMP_MILLI_SECONDS);
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ int code = taos_errno(pRes);
+ taos_free_result(pRes);
+ return code;
+}
+
+int sml_add_tag_col_Test() {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
+ taos_free_result(pRes);
+
+ const char *sql[] = {
+ "macylr,t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"binaryColValue\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000"
+ };
+ pRes = taos_query(taos, "use sml_db");
+ taos_free_result(pRes);
+
+ pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, 0);
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ int code = taos_errno(pRes);
+ taos_free_result(pRes);
+ if (code) return code;
+
+ const char *sql1[] = {
+ "macylr,id=macylr_17875_1804,t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\",t11=127i8,t10=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"binaryColValue\",c8=L\"ncharColValue\",c9=7u64,c11=L\"ncharColValue\",c10=f 1626006833639000000"
+ };
+
+ pRes = taos_schemaless_insert(taos, (char **)sql1, sizeof(sql1) / sizeof(sql1[0]), TSDB_SML_LINE_PROTOCOL, 0);
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ code = taos_errno(pRes);
+ taos_free_result(pRes);
+
+ return code;
+}
+
+int main(int argc, char *argv[]) {
+ int ret = 0;
+ ret = smlProcess_influx_Test();
+ if(ret) return ret;
+ ret = smlProcess_telnet_Test();
+ if(ret) return ret;
+ ret = smlProcess_json1_Test();
+ if(ret) return ret;
+ ret = smlProcess_json2_Test();
+ if(ret) return ret;
+ ret = smlProcess_json3_Test();
+ if(ret) return ret;
+ ret = smlProcess_json4_Test();
+ if(ret) return ret;
+ ret = sml_TD15662_Test();
+ if(ret) return ret;
+ ret = sml_TD15742_Test();
+ if(ret) return ret;
+ ret = sml_16384_Test();
+ if(ret) return ret;
+ ret = sml_oom_Test();
+ if(ret) return ret;
+ ret = sml_16368_Test();
+ if(ret) return ret;
+ ret = sml_dup_time_Test();
+ if(ret) return ret;
+ ret = sml_16960_Test();
+ if(ret) return ret;
+ ret = sml_add_tag_col_Test();
+ return ret;
+}
diff --git a/tools/taos-tools b/tools/taos-tools
deleted file mode 160000
index 817cb6ac43..0000000000
--- a/tools/taos-tools
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 817cb6ac431ed8ae4c843872cdfc8c201c1e1894
diff --git a/tools/taosadapter b/tools/taosadapter
deleted file mode 160000
index df8678f070..0000000000
--- a/tools/taosadapter
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit df8678f070e3f707faf59baebec90065f6e1268b
diff --git a/tools/taosws-rs b/tools/taosws-rs
deleted file mode 160000
index 9de599dc52..0000000000
--- a/tools/taosws-rs
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 9de599dc5293e9c90bc00bc4a03f8b91ba756bc3