diff --git a/cmake/cmake.define b/cmake/cmake.define
index 5637c666b9..8d71870e7d 100644
--- a/cmake/cmake.define
+++ b/cmake/cmake.define
@@ -46,7 +46,7 @@ ENDIF ()
IF (TD_WINDOWS)
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
- SET(COMMON_FLAGS "/w /D_WIN32 /Zi")
+ SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi")
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
diff --git a/docs-cn/14-reference/03-connector/cpp.mdx b/docs-cn/14-reference/03-connector/cpp.mdx
index aba1d6c717..aecf9fde12 100644
--- a/docs-cn/14-reference/03-connector/cpp.mdx
+++ b/docs-cn/14-reference/03-connector/cpp.mdx
@@ -114,7 +114,6 @@ TDengine 客户端驱动的安装请参考 [安装指南](/reference/connector#
订阅和消费
```c
-{{#include examples/c/subscribe.c}}
```
diff --git a/docs-cn/20-third-party/01-grafana.mdx b/docs-cn/20-third-party/01-grafana.mdx
index 328bd6bb45..40b5c0ff4f 100644
--- a/docs-cn/20-third-party/01-grafana.mdx
+++ b/docs-cn/20-third-party/01-grafana.mdx
@@ -18,21 +18,22 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/
## 配置 Grafana
-TDengine 的 Grafana 插件托管在 GitHub,可从 下载,当前最新版本为 3.1.4。
-
-推荐使用 [`grafana-cli` 命令行工具](https://grafana.com/docs/grafana/latest/administration/cli/) 进行插件安装。
+使用 [`grafana-cli` 命令行工具](https://grafana.com/docs/grafana/latest/administration/cli/) 进行插件[安装](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation)。
```bash
-sudo -u grafana grafana-cli \
- --pluginUrl https://github.com/taosdata/grafanaplugin/releases/download/v3.1.7/tdengine-datasource-3.1.7.zip \
- plugins install tdengine-datasource
+grafana-cli plugins install tdengine-datasource
+# with sudo
+sudo -u grafana grafana-cli plugins install tdengine-datasource
```
-或者下载到本地并解压到 Grafana 插件目录。
+或者从 [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) 或 [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) 下载 .zip 文件到本地并解压到 Grafana 插件目录。命令行下载示例如下:
```bash
-GF_VERSION=3.1.7
+GF_VERSION=3.2.2
+# from GitHub
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
+# from Grafana
+wget -O tdengine-datasource-$GF_VERSION.zip https://grafana.com/api/plugins/tdengine-datasource/versions/$GF_VERSION/download
```
以 CentOS 7.2 操作系统为例,将插件包解压到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。
@@ -41,28 +42,17 @@ wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/td
sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/
```
-:::note
-3.1.6 和更早版本未签名,会在 Grafana 7.3+ / 8.x 版本签名检查时失败导致无法加载插件,需要在 grafana.ini 文件中修改配置如下:
-
-```ini
-[plugins]
-allow_loading_unsigned_plugins = tdengine-datasource
-```
-
-:::
-
-在 Docker 环境下,可以使用如下的环境变量设置自动安装并设置 TDengine 插件:
+如果 Grafana 在 Docker 环境下运行,可以使用如下的环境变量设置自动安装 TDengine 数据源插件:
```bash
-GF_INSTALL_PLUGINS=https://github.com/taosdata/grafanaplugin/releases/download/v3.1.4/tdengine-datasource-3.1.4.zip;tdengine-datasource
-GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource
+GF_INSTALL_PLUGINS=tdengine-datasource
```
## 使用 Grafana
### 配置数据源
-用户可以直接通过 http://localhost:3000 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示:
+用户可以直接通过 的网址,登录 Grafana 服务器(用户名/密码:admin/admin),通过左侧 `Configuration -> Data Sources` 可以添加数据源,如下图所示:

@@ -74,7 +64,7 @@ GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource

-- Host: TDengine 集群中提供 REST 服务 (在 2.4 之前由 taosd 提供, 从 2.4 开始由 taosAdapter 提供)的组件所在服务器的 IP 地址与 TDengine REST 服务的端口号(6041),默认 http://localhost:6041。
+- Host: TDengine 集群中提供 REST 服务 (在 2.4 之前由 taosd 提供, 从 2.4 开始由 taosAdapter 提供)的组件所在服务器的 IP 地址与 TDengine REST 服务的端口号(6041),默认 。
- User:TDengine 用户名。
- Password:TDengine 用户密码。
diff --git a/docs-en/14-reference/03-connector/cpp.mdx b/docs-en/14-reference/03-connector/cpp.mdx
index d13a74384c..d549413012 100644
--- a/docs-en/14-reference/03-connector/cpp.mdx
+++ b/docs-en/14-reference/03-connector/cpp.mdx
@@ -114,7 +114,6 @@ This section shows sample code for standard access methods to TDengine clusters
Subscribe and consume
```c
-{{#include examples/c/subscribe.c}}
```
diff --git a/docs-en/20-third-party/01-grafana.mdx b/docs-en/20-third-party/01-grafana.mdx
index b3cab62710..1a84e02c66 100644
--- a/docs-en/20-third-party/01-grafana.mdx
+++ b/docs-en/20-third-party/01-grafana.mdx
@@ -9,7 +9,8 @@ You can learn more about using the TDengine plugin on [GitHub](https://github.co
## Prerequisites
-In order for Grafana to add the TDengine data source successfully, the following preparation is required:
+In order for Grafana to add the TDengine data source successfully, the following preparations are required:
+
1. The TDengine cluster is deployed and functioning properly
2. taosAdapter is installed and running properly. Please refer to the taosAdapter manual for details.
@@ -19,41 +20,34 @@ TDengine currently supports Grafana versions 7.0 and above. Users can go to the
## Configuring Grafana
-You can download The Grafana plugin for TDengine from . The current latest version is 3.1.4.
-
-Recommend using the [``grafana-cli`` command-line tool](https://grafana.com/docs/grafana/latest/administration/cli/) for plugin installation.
+Follow the installation steps in [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) with the [``grafana-cli`` command-line tool](https://grafana.com/docs/grafana/latest/administration/cli/) for plugin installation.
```bash
-sudo -u grafana grafana-cli \
- --pluginUrl https://github.com/taosdata/grafanaplugin/releases/download/v3.1.4/tdengine-datasource-3.1.4.zip \
- plugins install tdengine-datasource
+grafana-cli plugins install tdengine-datasource
+# with sudo
+sudo -u grafana grafana-cli plugins install tdengine-datasource
```
-Or download it locally and extract it to the Grafana plugin directory.
+Alternatively, you can manually download the .zip file from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and unpack it into your grafana plugins directory.
```bash
-GF_VERSION=3.1.4
+GF_VERSION=3.2.2
+# from GitHub
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
+# from Grafana
+wget -O tdengine-datasource-$GF_VERSION.zip https://grafana.com/api/plugins/tdengine-datasource/versions/$GF_VERSION/download
```
-In CentOS 7.2 for example, extract the plugin package to /var/lib/grafana/plugins directory, and restart grafana.
+Take CentOS 7.2 for example, extract the plugin package to /var/lib/grafana/plugins directory, and restart grafana.
```bash
sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/
```
-Grafana versions 7.3+ / 8.x do signature checks on plugins, so you also need to add the following line to the grafana.ini file to use the plugin correctly.
-
-```ini
-[plugins]
-allow_loading_unsigned_plugins = tdengine-datasource
-```
-
-The TDengine plugin can be automatically installed and set up using the following environment variable settings in a Docker environment.
+If Grafana is running in a Docker environment, the TDengine plugin can be automatically installed and set up using the following environment variable settings:
```bash
-GF_INSTALL_PLUGINS=https://github.com/taosdata/grafanaplugin/releases/download/v3.1.4/tdengine-datasource-3.1.4.zip;tdengine- datasource
-GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=tdengine-datasource
+GF_INSTALL_PLUGINS=tdengine-datasource
```
## Using Grafana
diff --git a/examples/c/CMakeLists.txt b/examples/c/CMakeLists.txt
index eff492945e..4a9007acec 100644
--- a/examples/c/CMakeLists.txt
+++ b/examples/c/CMakeLists.txt
@@ -13,7 +13,7 @@ IF (TD_LINUX)
#TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua)
add_executable(tmq "")
- add_executable(tstream "")
+ add_executable(stream_demo "")
add_executable(demoapi "")
target_sources(tmq
@@ -21,9 +21,9 @@ IF (TD_LINUX)
"tmq.c"
)
- target_sources(tstream
+ target_sources(stream_demo
PRIVATE
- "tstream.c"
+ "stream_demo.c"
)
target_sources(demoapi
@@ -35,7 +35,7 @@ IF (TD_LINUX)
taos_static
)
- target_link_libraries(tstream
+ target_link_libraries(stream_demo
taos_static
)
@@ -48,7 +48,7 @@ IF (TD_LINUX)
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
- target_include_directories(tstream
+ target_include_directories(stream_demo
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
@@ -59,7 +59,7 @@ IF (TD_LINUX)
)
SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
- SET_TARGET_PROPERTIES(tstream PROPERTIES OUTPUT_NAME tstream)
+ SET_TARGET_PROPERTIES(stream_demo PROPERTIES OUTPUT_NAME stream_demo)
SET_TARGET_PROPERTIES(demoapi PROPERTIES OUTPUT_NAME demoapi)
ENDIF ()
IF (TD_DARWIN)
diff --git a/examples/c/tstream.c b/examples/c/stream_demo.c
similarity index 100%
rename from examples/c/tstream.c
rename to examples/c/stream_demo.c
diff --git a/examples/c/subscribe.c b/examples/c/subscribe.c
deleted file mode 100644
index 66d64d295c..0000000000
--- a/examples/c/subscribe.c
+++ /dev/null
@@ -1,263 +0,0 @@
-// sample code for TDengine subscribe/consume API
-// to compile: gcc -o subscribe subscribe.c -ltaos
-
-#include
-#include
-#include
-#include
-#include "../../../include/client/taos.h" // include TDengine header file
-
-int nTotalRows;
-
-void print_result(TAOS_RES* res, int blockFetch) {
- TAOS_ROW row = NULL;
- int num_fields = taos_num_fields(res);
- TAOS_FIELD* fields = taos_fetch_fields(res);
- int nRows = 0;
-
- if (blockFetch) {
- nRows = taos_fetch_block(res, &row);
- //for (int i = 0; i < nRows; i++) {
- // taos_print_row(buf, row + i, fields, num_fields);
- // puts(buf);
- //}
- } else {
- while ((row = taos_fetch_row(res))) {
- char buf[4096] = {0};
- taos_print_row(buf, row, fields, num_fields);
- puts(buf);
- nRows++;
- }
- }
-
- nTotalRows += nRows;
- printf("%d rows consumed.\n", nRows);
-}
-
-
-void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) {
- print_result(res, *(int*)param);
-}
-
-
-void check_row_count(int line, TAOS_RES* res, int expected) {
- int actual = 0;
- TAOS_ROW row;
- while ((row = taos_fetch_row(res))) {
- actual++;
- }
- if (actual != expected) {
- printf("line %d: row count mismatch, expected: %d, actual: %d\n", line, expected, actual);
- } else {
- printf("line %d: %d rows consumed as expected\n", line, actual);
- }
-}
-
-
-void do_query(TAOS* taos, const char* sql) {
- TAOS_RES* res = taos_query(taos, sql);
- taos_free_result(res);
-}
-
-
-void run_test(TAOS* taos) {
- do_query(taos, "drop database if exists test;");
-
- usleep(100000);
- do_query(taos, "create database test;");
- usleep(100000);
- do_query(taos, "use test;");
-
- usleep(100000);
- do_query(taos, "create table meters(ts timestamp, a int) tags(area int);");
-
- do_query(taos, "create table t0 using meters tags(0);");
- do_query(taos, "create table t1 using meters tags(1);");
- do_query(taos, "create table t2 using meters tags(2);");
- do_query(taos, "create table t3 using meters tags(3);");
- do_query(taos, "create table t4 using meters tags(4);");
- do_query(taos, "create table t5 using meters tags(5);");
- do_query(taos, "create table t6 using meters tags(6);");
- do_query(taos, "create table t7 using meters tags(7);");
- do_query(taos, "create table t8 using meters tags(8);");
- do_query(taos, "create table t9 using meters tags(9);");
-
- do_query(taos, "insert into t0 values('2020-01-01 00:00:00.000', 0);");
- do_query(taos, "insert into t0 values('2020-01-01 00:01:00.000', 0);");
- do_query(taos, "insert into t0 values('2020-01-01 00:02:00.000', 0);");
- do_query(taos, "insert into t1 values('2020-01-01 00:00:00.000', 0);");
- do_query(taos, "insert into t1 values('2020-01-01 00:01:00.000', 0);");
- do_query(taos, "insert into t1 values('2020-01-01 00:02:00.000', 0);");
- do_query(taos, "insert into t1 values('2020-01-01 00:03:00.000', 0);");
- do_query(taos, "insert into t2 values('2020-01-01 00:00:00.000', 0);");
- do_query(taos, "insert into t2 values('2020-01-01 00:01:00.000', 0);");
- do_query(taos, "insert into t2 values('2020-01-01 00:01:01.000', 0);");
- do_query(taos, "insert into t2 values('2020-01-01 00:01:02.000', 0);");
- do_query(taos, "insert into t3 values('2020-01-01 00:01:02.000', 0);");
- do_query(taos, "insert into t4 values('2020-01-01 00:01:02.000', 0);");
- do_query(taos, "insert into t5 values('2020-01-01 00:01:02.000', 0);");
- do_query(taos, "insert into t6 values('2020-01-01 00:01:02.000', 0);");
- do_query(taos, "insert into t7 values('2020-01-01 00:01:02.000', 0);");
- do_query(taos, "insert into t8 values('2020-01-01 00:01:02.000', 0);");
- do_query(taos, "insert into t9 values('2020-01-01 00:01:02.000', 0);");
-
- // super tables subscription
- usleep(1000000);
-
- TAOS_SUB* tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0);
- TAOS_RES* res = taos_consume(tsub);
- check_row_count(__LINE__, res, 18);
-
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 0);
-
- do_query(taos, "insert into t0 values('2020-01-01 00:02:00.001', 0);");
- do_query(taos, "insert into t8 values('2020-01-01 00:01:03.000', 0);");
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 2);
-
- do_query(taos, "insert into t2 values('2020-01-01 00:01:02.001', 0);");
- do_query(taos, "insert into t1 values('2020-01-01 00:03:00.001', 0);");
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 2);
-
- do_query(taos, "insert into t1 values('2020-01-01 00:03:00.002', 0);");
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 1);
-
- // keep progress information and restart subscription
- taos_unsubscribe(tsub, 1);
- do_query(taos, "insert into t0 values('2020-01-01 00:04:00.000', 0);");
- tsub = taos_subscribe(taos, 1, "test", "select * from meters;", NULL, NULL, 0);
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 24);
-
- // keep progress information and continue previous subscription
- taos_unsubscribe(tsub, 1);
- tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0);
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 0);
-
- // don't keep progress information and continue previous subscription
- taos_unsubscribe(tsub, 0);
- tsub = taos_subscribe(taos, 0, "test", "select * from meters;", NULL, NULL, 0);
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 24);
-
- // single meter subscription
-
- taos_unsubscribe(tsub, 0);
- tsub = taos_subscribe(taos, 0, "test", "select * from t0;", NULL, NULL, 0);
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 5);
-
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 0);
-
- do_query(taos, "insert into t0 values('2020-01-01 00:04:00.001', 0);");
- res = taos_consume(tsub);
- check_row_count(__LINE__, res, 1);
-
- taos_unsubscribe(tsub, 0);
-}
-
-
-int main(int argc, char *argv[]) {
- const char* host = "127.0.0.1";
- const char* user = "root";
- const char* passwd = "taosdata";
- const char* sql = "select * from meters;";
- const char* topic = "test-multiple";
- int async = 1, restart = 0, keep = 1, test = 0, blockFetch = 0;
-
- for (int i = 1; i < argc; i++) {
- if (strncmp(argv[i], "-h=", 3) == 0) {
- host = argv[i] + 3;
- continue;
- }
- if (strncmp(argv[i], "-u=", 3) == 0) {
- user = argv[i] + 3;
- continue;
- }
- if (strncmp(argv[i], "-p=", 3) == 0) {
- passwd = argv[i] + 3;
- continue;
- }
- if (strcmp(argv[i], "-sync") == 0) {
- async = 0;
- continue;
- }
- if (strcmp(argv[i], "-restart") == 0) {
- restart = 1;
- continue;
- }
- if (strcmp(argv[i], "-single") == 0) {
- sql = "select * from t0;";
- topic = "test-single";
- continue;
- }
- if (strcmp(argv[i], "-nokeep") == 0) {
- keep = 0;
- continue;
- }
- if (strncmp(argv[i], "-sql=", 5) == 0) {
- sql = argv[i] + 5;
- topic = "test-custom";
- continue;
- }
- if (strcmp(argv[i], "-test") == 0) {
- test = 1;
- continue;
- }
- if (strcmp(argv[i], "-block-fetch") == 0) {
- blockFetch = 1;
- continue;
- }
- }
-
- TAOS* taos = taos_connect(host, user, passwd, "", 0);
- if (taos == NULL) {
- printf("failed to connect to db, reason:%s\n", taos_errstr(taos));
- exit(1);
- }
-
- if (test) {
- run_test(taos);
- taos_close(taos);
- exit(0);
- }
-
- taos_select_db(taos, "test");
- TAOS_SUB* tsub = NULL;
- if (async) {
- // create an asynchronized subscription, the callback function will be called every 1s
- tsub = taos_subscribe(taos, restart, topic, sql, subscribe_callback, &blockFetch, 1000);
- } else {
- // create an synchronized subscription, need to call 'taos_consume' manually
- tsub = taos_subscribe(taos, restart, topic, sql, NULL, NULL, 0);
- }
-
- if (tsub == NULL) {
- printf("failed to create subscription.\n");
- exit(0);
- }
-
- if (async) {
- getchar();
- } else while(1) {
- TAOS_RES* res = taos_consume(tsub);
- if (res == NULL) {
- printf("failed to consume data.");
- break;
- } else {
- print_result(res, blockFetch);
- getchar();
- }
- }
-
- printf("total rows consumed: %d\n", nTotalRows);
- taos_unsubscribe(tsub, keep);
- taos_close(taos);
-
- return 0;
-}
diff --git a/examples/c/tmq.c b/examples/c/tmq.c
index e61ad69e6b..40d72d3af1 100644
--- a/examples/c/tmq.c
+++ b/examples/c/tmq.c
@@ -195,7 +195,7 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
if (tmqmessage) {
cnt++;
msg_process(tmqmessage);
- if (cnt >= 2) break;
+ /*if (cnt >= 2) break;*/
/*printf("get data\n");*/
taos_free_result(tmqmessage);
/*} else {*/
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index 069ed74e10..0155271811 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -2570,6 +2570,12 @@ static FORCE_INLINE void* tDecodeSMqDataBlkRsp(const void* buf, SMqDataBlkRsp* p
buf = taosDecodeFixedI8(buf, &pRsp->withTbName);
buf = taosDecodeFixedI8(buf, &pRsp->withSchema);
buf = taosDecodeFixedI8(buf, &pRsp->withTag);
+ if (pRsp->withTbName) {
+ pRsp->blockTbName = taosArrayInit(pRsp->blockNum, sizeof(void*));
+ }
+ if (pRsp->withSchema) {
+ pRsp->blockSchema = taosArrayInit(pRsp->blockNum, sizeof(void*));
+ }
for (int32_t i = 0; i < pRsp->blockNum; i++) {
int32_t bLen = 0;
@@ -2579,20 +2585,14 @@ static FORCE_INLINE void* tDecodeSMqDataBlkRsp(const void* buf, SMqDataBlkRsp* p
taosArrayPush(pRsp->blockDataLen, &bLen);
taosArrayPush(pRsp->blockData, &data);
if (pRsp->withSchema) {
- pRsp->blockSchema = taosArrayInit(pRsp->blockNum, sizeof(void*));
SSchemaWrapper* pSW = (SSchemaWrapper*)taosMemoryMalloc(sizeof(SSchemaWrapper));
buf = taosDecodeSSchemaWrapper(buf, pSW);
taosArrayPush(pRsp->blockSchema, &pSW);
- } else {
- pRsp->blockSchema = NULL;
}
if (pRsp->withTbName) {
- pRsp->blockTbName = taosArrayInit(pRsp->blockNum, sizeof(void*));
char* name = NULL;
buf = taosDecodeString(buf, &name);
taosArrayPush(pRsp->blockTbName, &name);
- } else {
- pRsp->blockTbName = NULL;
}
}
}
diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h
index 0525cbf367..f7ad7b4ed8 100644
--- a/include/libs/stream/tstream.h
+++ b/include/libs/stream/tstream.h
@@ -80,6 +80,37 @@ typedef struct {
int8_t type;
} SStreamCheckpoint;
+typedef struct {
+ STaosQueue* queue;
+ STaosQall* qall;
+ void* qItem;
+ int8_t failed;
+} SStreamQ;
+
+static FORCE_INLINE void* streamQCurItem(SStreamQ* queue) {
+ //
+ return queue->qItem;
+}
+
+static FORCE_INLINE void* streamQNextItem(SStreamQ* queue) {
+ int8_t failed = atomic_load_8(&queue->failed);
+ if (failed) {
+ ASSERT(queue->qItem != NULL);
+ return streamQCurItem(queue);
+ } else {
+ taosGetQitem(queue->qall, &queue->qItem);
+ if (queue->qItem == NULL) {
+ taosReadAllQitems(queue->queue, queue->qall);
+ taosGetQitem(queue->qall, &queue->qItem);
+ }
+ return streamQCurItem(queue);
+ }
+}
+
+static FORCE_INLINE void streamQSetFail(SStreamQ* queue) { atomic_store_8(&queue->failed, 1); }
+
+static FORCE_INLINE void streamQSetSuccess(SStreamQ* queue) { atomic_store_8(&queue->failed, 0); }
+
static FORCE_INLINE SStreamDataSubmit* streamDataSubmitNew(SSubmitReq* pReq) {
SStreamDataSubmit* pDataSubmit = (SStreamDataSubmit*)taosAllocateQitem(sizeof(SStreamDataSubmit), DEF_QITEM);
if (pDataSubmit == NULL) return NULL;
diff --git a/include/util/taoserror.h b/include/util/taoserror.h
index c3d2788897..4550bccbed 100644
--- a/include/util/taoserror.h
+++ b/include/util/taoserror.h
@@ -85,6 +85,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_RPC_NETWORK_UNAVAIL TAOS_DEF_ERROR_CODE(0, 0x0102)
#define TSDB_CODE_RPC_FQDN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0103)
#define TSDB_CODE_RPC_PORT_EADDRINUSE TAOS_DEF_ERROR_CODE(0, 0x0104)
+#define TSDB_CODE_RPC_INDIRECT_NETWORK_UNAVAIL TAOS_DEF_ERROR_CODE(0, 0x0105)
//client
#define TSDB_CODE_TSC_INVALID_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0200)
diff --git a/include/util/ttimer.h b/include/util/ttimer.h
index 1022259631..4111a8ca28 100644
--- a/include/util/ttimer.h
+++ b/include/util/ttimer.h
@@ -31,16 +31,16 @@ extern int32_t taosTmrThreads;
void *taosTmrInit(int32_t maxTmr, int32_t resoultion, int32_t longest, const char *label);
+void taosTmrCleanUp(void *handle);
+
tmr_h taosTmrStart(TAOS_TMR_CALLBACK fp, int32_t mseconds, void *param, void *handle);
bool taosTmrStop(tmr_h tmrId);
-bool taosTmrStopA(tmr_h *timerId);
+bool taosTmrStopA(tmr_h *tmrId);
bool taosTmrReset(TAOS_TMR_CALLBACK fp, int32_t mseconds, void *param, void *handle, tmr_h *pTmrId);
-void taosTmrCleanUp(void *handle);
-
#ifdef __cplusplus
}
#endif
diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h
index c5fa377fea..d5e07ce676 100644
--- a/source/client/inc/clientInt.h
+++ b/source/client/inc/clientInt.h
@@ -219,7 +219,8 @@ typedef struct SRequestObj {
void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4);
void doSetOneRowPtr(SReqResultInfo* pResultInfo);
void setResPrecision(SReqResultInfo* pResInfo, int32_t precision);
-int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4);
+int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4,
+ bool freeAfterUse);
void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols);
void doFreeReqResultInfo(SReqResultInfo* pResInfo);
@@ -241,7 +242,7 @@ static FORCE_INLINE SReqResultInfo* tmqGetNextResInfo(TAOS_RES* res, bool conver
taosMemoryFreeClear(msg->resInfo.length);
taosMemoryFreeClear(msg->resInfo.convertBuf);
}
- setQueryResultFromRsp(&msg->resInfo, pRetrieve, convertUcs4);
+ setQueryResultFromRsp(&msg->resInfo, pRetrieve, convertUcs4, false);
return &msg->resInfo;
}
return NULL;
@@ -319,7 +320,7 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, int32_t code
int32_t getQueryPlan(SRequestObj* pRequest, SQuery* pQuery, SArray** pNodeList);
int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList, void** res);
int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest);
-int32_t updateQnodeList(SAppInstInfo*pInfo, SArray* pNodeList);
+int32_t updateQnodeList(SAppInstInfo* pInfo, SArray* pNodeList);
#ifdef __cplusplus
}
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index ee6cea79a7..375e1c0da9 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -117,7 +117,7 @@ TAOS* taos_connect_internal(const char* ip, const char* user, const char* pass,
SAppInstInfo* p = NULL;
if (pInst == NULL) {
p = taosMemoryCalloc(1, sizeof(struct SAppInstInfo));
- p->mgmtEp = epSet;
+ p->mgmtEp = epSet;
taosThreadMutexInit(&p->qnodeMutex, NULL);
p->pTransporter = openTransporter(user, secretEncrypt, tsNumOfCores);
p->pAppHbMgr = appHbMgrInit(p, key);
@@ -203,7 +203,7 @@ int32_t execLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
SRetrieveTableRsp* pRsp = NULL;
int32_t code = qExecCommand(pQuery->pRoot, &pRsp);
if (TSDB_CODE_SUCCESS == code && NULL != pRsp) {
- code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false);
+ code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, false);
}
return code;
}
@@ -230,23 +230,23 @@ int32_t execDdlQuery(SRequestObj* pRequest, SQuery* pQuery) {
}
int compareQueryNodeLoad(const void* elem1, const void* elem2) {
- SQueryNodeLoad *node1 = (SQueryNodeLoad *)elem1;
- SQueryNodeLoad *node2 = (SQueryNodeLoad *)elem2;
+ SQueryNodeLoad* node1 = (SQueryNodeLoad*)elem1;
+ SQueryNodeLoad* node2 = (SQueryNodeLoad*)elem2;
if (node1->load < node2->load) {
return -1;
}
-
+
return node1->load > node2->load;
}
-int32_t updateQnodeList(SAppInstInfo*pInfo, SArray* pNodeList) {
+int32_t updateQnodeList(SAppInstInfo* pInfo, SArray* pNodeList) {
taosThreadMutexLock(&pInfo->qnodeMutex);
if (pInfo->pQnodeList) {
taosArrayDestroy(pInfo->pQnodeList);
pInfo->pQnodeList = NULL;
}
-
+
if (pNodeList) {
pInfo->pQnodeList = taosArrayDup(pNodeList);
taosArraySort(pInfo->pQnodeList, compareQueryNodeLoad);
@@ -257,9 +257,9 @@ int32_t updateQnodeList(SAppInstInfo*pInfo, SArray* pNodeList) {
}
int32_t getQnodeList(SRequestObj* pRequest, SArray** pNodeList) {
- SAppInstInfo*pInfo = pRequest->pTscObj->pAppInfo;
- int32_t code = 0;
-
+ SAppInstInfo* pInfo = pRequest->pTscObj->pAppInfo;
+ int32_t code = 0;
+
taosThreadMutexLock(&pInfo->qnodeMutex);
if (pInfo->pQnodeList) {
*pNodeList = taosArrayDup(pInfo->pQnodeList);
@@ -267,14 +267,14 @@ int32_t getQnodeList(SRequestObj* pRequest, SArray** pNodeList) {
taosThreadMutexUnlock(&pInfo->qnodeMutex);
if (NULL == *pNodeList) {
- SEpSet mgmtEpSet = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
- SCatalog* pCatalog = NULL;
+ SEpSet mgmtEpSet = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
+ SCatalog* pCatalog = NULL;
code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
if (TSDB_CODE_SUCCESS == code) {
*pNodeList = taosArrayInit(5, sizeof(SQueryNodeLoad));
code = catalogGetQnodeList(pCatalog, pRequest->pTscObj->pAppInfo->pTransporter, &mgmtEpSet, *pNodeList);
}
-
+
if (TSDB_CODE_SUCCESS == code && *pNodeList) {
code = updateQnodeList(pInfo, *pNodeList);
}
@@ -342,13 +342,13 @@ void setResPrecision(SReqResultInfo* pResInfo, int32_t precision) {
int32_t scheduleAsyncQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList, void** pRes) {
void* pTransporter = pRequest->pTscObj->pAppInfo->pTransporter;
-
+
tsem_init(&schdRspSem, 0, 0);
SQueryResult res = {.code = 0, .numOfRows = 0};
int32_t code = schedulerAsyncExecJob(pTransporter, pNodeList, pDag, &pRequest->body.queryJob, pRequest->sqlstr,
- pRequest->metric.start, schdExecCallback, &res);
- while (true) {
+ pRequest->metric.start, schdExecCallback, &res);
+ while (true) {
if (code != TSDB_CODE_SUCCESS) {
if (pRequest->body.queryJob != 0) {
schedulerFreeJob(pRequest->body.queryJob);
@@ -361,7 +361,7 @@ int32_t scheduleAsyncQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNod
return pRequest->code;
} else {
tsem_wait(&schdRspSem);
-
+
if (res.code) {
code = res.code;
} else {
@@ -385,7 +385,6 @@ int32_t scheduleAsyncQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNod
return pRequest->code;
}
-
int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList, void** pRes) {
void* pTransporter = pRequest->pTscObj->pAppInfo->pTransporter;
@@ -783,7 +782,7 @@ void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg,
if (NULL == pEpSet) {
return;
}
-
+
switch (pSendInfo->target.type) {
case TARGET_TYPE_MNODE:
if (NULL == pTscObj) {
@@ -791,7 +790,7 @@ void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg,
return;
}
- updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, pEpSet);
+ updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, pEpSet);
break;
case TARGET_TYPE_VNODE: {
if (NULL == pTscObj) {
@@ -800,12 +799,13 @@ void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg,
}
SCatalog* pCatalog = NULL;
- int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
+ int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
if (code != TSDB_CODE_SUCCESS) {
- tscError("fail to get catalog handle, clusterId:%" PRIx64 ", error %s", pTscObj->pAppInfo->clusterId, tstrerror(code));
+ tscError("fail to get catalog handle, clusterId:%" PRIx64 ", error %s", pTscObj->pAppInfo->clusterId,
+ tstrerror(code));
return;
}
-
+
catalogUpdateVgEpSet(pCatalog, pSendInfo->target.dbFName, pSendInfo->target.vgId, pEpSet);
break;
}
@@ -815,12 +815,11 @@ void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg,
}
}
-
void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle;
assert(pMsg->info.ahandle != NULL);
SRequestObj* pRequest = NULL;
- STscObj* pTscObj = NULL;
+ STscObj* pTscObj = NULL;
if (pSendInfo->requestObjRefId != 0) {
SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId);
@@ -947,7 +946,8 @@ void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertU
return NULL;
}
- pRequest->code = setQueryResultFromRsp(&pRequest->body.resInfo, (SRetrieveTableRsp*)pResInfo->pData, convertUcs4);
+ pRequest->code =
+ setQueryResultFromRsp(&pRequest->body.resInfo, (SRetrieveTableRsp*)pResInfo->pData, convertUcs4, true);
if (pRequest->code != TSDB_CODE_SUCCESS) {
pResultInfo->numOfRows = 0;
return NULL;
@@ -969,9 +969,8 @@ void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertU
return pResultInfo->row;
}
-
void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) {
- //return doAsyncFetchRows(pRequest, setupOneRowPtr, convertUcs4);
+ // return doAsyncFetchRows(pRequest, setupOneRowPtr, convertUcs4);
assert(pRequest != NULL);
SReqResultInfo* pResultInfo = &pRequest->body.resInfo;
@@ -989,7 +988,8 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4)
return NULL;
}
- pRequest->code = setQueryResultFromRsp(&pRequest->body.resInfo, (SRetrieveTableRsp*)pResInfo->pData, convertUcs4);
+ pRequest->code =
+ setQueryResultFromRsp(&pRequest->body.resInfo, (SRetrieveTableRsp*)pResInfo->pData, convertUcs4, true);
if (pRequest->code != TSDB_CODE_SUCCESS) {
pResultInfo->numOfRows = 0;
return NULL;
@@ -1046,7 +1046,7 @@ static char* parseTagDatatoJson(void* p) {
memset(tagJsonKey, 0, sizeof(tagJsonKey));
memcpy(tagJsonKey, pTagVal->pKey, strlen(pTagVal->pKey));
// json value
- char type = pTagVal->type;
+ char type = pTagVal->type;
if (type == TSDB_DATA_TYPE_NULL) {
cJSON* value = cJSON_CreateNull();
if (value == NULL) {
@@ -1059,7 +1059,8 @@ static char* parseTagDatatoJson(void* p) {
char* tagJsonValue = taosMemoryCalloc(pTagVal->nData, 1);
int32_t length = taosUcs4ToMbs((TdUcs4*)pTagVal->pData, pTagVal->nData, tagJsonValue);
if (length < 0) {
- tscError("charset:%s to %s. val:%s convert json value failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, pTagVal->pData);
+ tscError("charset:%s to %s. val:%s convert json value failed.", DEFAULT_UNICODE_ENCODEC, tsCharset,
+ pTagVal->pData);
taosMemoryFree(tagJsonValue);
goto end;
}
@@ -1277,11 +1278,12 @@ void resetConnectDB(STscObj* pTscObj) {
taosThreadMutexUnlock(&pTscObj->mutex);
}
-int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4) {
+int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4,
+ bool freeAfterUse) {
assert(pResultInfo != NULL && pRsp != NULL);
- taosMemoryFreeClear(pResultInfo->pRspMsg);
-
+ if (freeAfterUse) taosMemoryFreeClear(pResultInfo->pRspMsg);
+
pResultInfo->pRspMsg = (const char*)pRsp;
pResultInfo->pData = (void*)pRsp->data;
pResultInfo->numOfRows = htonl(pRsp->numOfRows);
diff --git a/source/common/src/systable.c b/source/common/src/systable.c
index 8207ffb22f..b163ac32bf 100644
--- a/source/common/src/systable.c
+++ b/source/common/src/systable.c
@@ -36,6 +36,7 @@ static const SSysDbTableSchema mnodesSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "role", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "status", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
};
diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c
index 77fc156492..c66ff290cf 100644
--- a/source/common/src/tdataformat.c
+++ b/source/common/src/tdataformat.c
@@ -905,7 +905,7 @@ static void debugPrintTagVal(int8_t type, const void *val, int32_t vlen, const c
case TSDB_DATA_TYPE_VARCHAR:
case TSDB_DATA_TYPE_NCHAR: {
char tmpVal[32] = {0};
- memcpy(tmpVal, val, 32);
+ strncpy(tmpVal, val, vlen > 31 ? 31 : vlen);
printf("%s:%d type:%d vlen:%d, val:\"%s\"\n", tag, ln, (int32_t)type, vlen, tmpVal);
} break;
case TSDB_DATA_TYPE_FLOAT:
diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c
index 4578d81efb..7c94a33ffe 100644
--- a/source/dnode/mnode/impl/src/mndMnode.c
+++ b/source/dnode/mnode/impl/src/mndMnode.c
@@ -392,11 +392,6 @@ static int32_t mndProcessCreateMnodeReq(SRpcMsg *pReq) {
mDebug("mnode:%d, start to create", createReq.dnodeId);
- if (sdbGetSize(pMnode->pSdb, SDB_MNODE) >= 3) {
- terrno = TSDB_CODE_MND_TOO_MANY_MNODES;
- goto _OVER;
- }
-
pObj = mndAcquireMnode(pMnode, createReq.dnodeId);
if (pObj != NULL) {
terrno = TSDB_CODE_MND_MNODE_ALREADY_EXIST;
@@ -405,12 +400,22 @@ static int32_t mndProcessCreateMnodeReq(SRpcMsg *pReq) {
goto _OVER;
}
+ if (sdbGetSize(pMnode->pSdb, SDB_MNODE) >= 3) {
+ terrno = TSDB_CODE_MND_TOO_MANY_MNODES;
+ goto _OVER;
+ }
+
pDnode = mndAcquireDnode(pMnode, createReq.dnodeId);
if (pDnode == NULL) {
terrno = TSDB_CODE_MND_DNODE_NOT_EXIST;
goto _OVER;
}
+ if (!mndIsDnodeOnline(pMnode, pDnode, taosGetTimestampMs())) {
+ terrno = TSDB_CODE_NODE_OFFLINE;
+ goto _OVER;
+ }
+
pUser = mndAcquireUser(pMnode, pReq->conn.user);
if (pUser == NULL) {
terrno = TSDB_CODE_MND_NO_USER_FROM_CONN;
@@ -632,11 +637,12 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
int32_t numOfRows = 0;
int32_t cols = 0;
SMnodeObj *pObj = NULL;
+ ESdbStatus objStatus;
char *pWrite;
int64_t curMs = taosGetTimestampMs();
while (numOfRows < rows) {
- pShow->pIter = sdbFetch(pSdb, SDB_MNODE, pShow->pIter, (void **)&pObj);
+ pShow->pIter = sdbFetchAll(pSdb, SDB_MNODE, pShow->pIter, (void **)&pObj, &objStatus);
if (pShow->pIter == NULL) break;
cols = 0;
@@ -649,23 +655,26 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, b1, false);
- bool online = mndIsDnodeOnline(pMnode, pObj->pDnode, curMs);
- const char *roles = NULL;
+ const char *roles = "OFFLINE";
if (pObj->id == pMnode->selfDnodeId) {
roles = syncStr(TAOS_SYNC_STATE_LEADER);
- } else {
- if (!online) {
- roles = "OFFLINE";
- } else {
- roles = syncStr(pObj->state);
- }
}
- char *b2 = taosMemoryCalloc(1, 12 + VARSTR_HEADER_SIZE);
+ if (pObj->pDnode && mndIsDnodeOnline(pMnode, pObj->pDnode, curMs)) {
+ roles = syncStr(pObj->state);
+ }
+ char b2[12 + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(b2, roles, pShow->pMeta->pSchemas[cols].bytes);
-
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)b2, false);
+ const char *status = "READY";
+ if (objStatus == SDB_STATUS_CREATING) status = "CREATING";
+ if (objStatus == SDB_STATUS_DROPPING) status = "DROPPING";
+ char b3[9 + VARSTR_HEADER_SIZE] = {0};
+ STR_WITH_MAXSIZE_TO_VARSTR(b3, status, pShow->pMeta->pSchemas[cols].bytes);
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataAppend(pColInfo, numOfRows, (const char *)b3, false);
+
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)&pObj->createdTime, false);
diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c
index e191bb9b2a..bbee59090d 100644
--- a/source/dnode/mnode/impl/src/mndTrans.c
+++ b/source/dnode/mnode/impl/src/mndTrans.c
@@ -344,7 +344,7 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
action.pRaw = taosMemoryMalloc(dataLen);
if (action.pRaw == NULL) goto _OVER;
- mTrace("raw:%p, is created", pData);
+ mTrace("raw:%p, is created", action.pRaw);
SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER);
if (taosArrayPush(pTrans->commitActions, &action) == NULL) goto _OVER;
action.pRaw = NULL;
@@ -619,9 +619,7 @@ void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *
pTrans->paramLen = paramLen;
}
-void mndTransSetDbName(STrans *pTrans, const char *dbname) {
- memcpy(pTrans->dbname, dbname, TSDB_DB_FNAME_LEN);
-}
+void mndTransSetDbName(STrans *pTrans, const char *dbname) { memcpy(pTrans->dbname, dbname, TSDB_DB_FNAME_LEN); }
void mndTransSetSerial(STrans *pTrans) { pTrans->exec = TRN_EXEC_SERIAL; }
@@ -753,22 +751,30 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) {
sendRsp = true;
}
} else {
- if (pTrans->stage == TRN_STAGE_REDO_ACTION && pTrans->failedTimes > 6) {
+ if (pTrans->stage == TRN_STAGE_REDO_ACTION && pTrans->failedTimes > 3) {
if (code == 0) code = TSDB_CODE_MND_TRANS_UNKNOW_ERROR;
sendRsp = true;
}
}
if (sendRsp && pTrans->rpcInfo.handle != NULL) {
- void *rpcCont = rpcMallocCont(pTrans->rpcRspLen);
- if (rpcCont != NULL) {
- memcpy(rpcCont, pTrans->rpcRsp, pTrans->rpcRspLen);
- }
- taosMemoryFree(pTrans->rpcRsp);
-
mDebug("trans:%d, send rsp, code:0x%x stage:%s app:%p", pTrans->id, code, mndTransStr(pTrans->stage),
pTrans->rpcInfo.ahandle);
- SRpcMsg rspMsg = {.code = code, .pCont = rpcCont, .contLen = pTrans->rpcRspLen, .info = pTrans->rpcInfo};
+ if (code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
+ code = TSDB_CODE_RPC_INDIRECT_NETWORK_UNAVAIL;
+ }
+ SRpcMsg rspMsg = {.code = code, .info = pTrans->rpcInfo};
+
+ if (pTrans->rpcRspLen != 0) {
+ void *rpcCont = rpcMallocCont(pTrans->rpcRspLen);
+ if (rpcCont != NULL) {
+ memcpy(rpcCont, pTrans->rpcRsp, pTrans->rpcRspLen);
+ rspMsg.pCont = rpcCont;
+ rspMsg.contLen = pTrans->rpcRspLen;
+ }
+ taosMemoryFree(pTrans->rpcRsp);
+ }
+
tmsgSendRsp(&rspMsg);
pTrans->rpcInfo.handle = NULL;
pTrans->rpcRsp = NULL;
@@ -1000,6 +1006,9 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
if (pAction->msgReceived) {
if (pAction->errCode != 0 && pAction->errCode != pAction->acceptableCode) {
code = pAction->errCode;
+ pAction->msgSent = 0;
+ pAction->msgReceived = 0;
+ mDebug("trans:%d, %s:%d execute status is reset", pTrans->id, mndTransStr(pAction->stage), action);
}
} else {
code = TSDB_CODE_ACTION_IN_PROGRESS;
@@ -1025,18 +1034,23 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
}
if (code == 0) {
+ pTrans->code = 0;
pTrans->redoActionPos++;
mDebug("trans:%d, %s:%d is executed and need sync to other mnodes", pTrans->id, mndTransStr(pAction->stage),
pAction->id);
code = mndTransSync(pMnode, pTrans);
if (code != 0) {
- mError("trans:%d, failed to sync redoActionPos since %s", pTrans->id, terrstr());
+ pTrans->code = terrno;
+ mError("trans:%d, %s:%d is executed and failed to sync to other mnodes since %s", pTrans->id,
+ mndTransStr(pAction->stage), pAction->id, terrstr());
break;
}
} else if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
mDebug("trans:%d, %s:%d is in progress and wait it finish", pTrans->id, mndTransStr(pAction->stage), pAction->id);
break;
} else {
+ terrno = code;
+ pTrans->code = code;
mError("trans:%d, %s:%d failed to execute since %s", pTrans->id, mndTransStr(pAction->stage), pAction->id,
terrstr());
break;
@@ -1239,19 +1253,8 @@ int32_t mndKillTrans(SMnode *pMnode, STrans *pTrans) {
return -1;
}
- int32_t size = taosArrayGetSize(pArray);
-
- for (int32_t i = 0; i < size; ++i) {
+ for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) {
STransAction *pAction = taosArrayGet(pArray, i);
- if (pAction == NULL) continue;
-
- if (pAction->msgReceived == 0) {
- mInfo("trans:%d, %s:%d set processed for kill msg received", pTrans->id, mndTransStr(pAction->stage), i);
- pAction->msgSent = 1;
- pAction->msgReceived = 1;
- pAction->errCode = 0;
- }
-
if (pAction->errCode != 0) {
mInfo("trans:%d, %s:%d set processed for kill msg received, errCode from %s to success", pTrans->id,
mndTransStr(pAction->stage), i, tstrerror(pAction->errCode));
@@ -1290,9 +1293,7 @@ static int32_t mndProcessKillTransReq(SRpcMsg *pReq) {
pTrans = mndAcquireTrans(pMnode, killReq.transId);
if (pTrans == NULL) {
- terrno = TSDB_CODE_MND_TRANS_NOT_EXIST;
- mError("trans:%d, failed to kill since %s", killReq.transId, terrstr());
- return -1;
+ goto _OVER;
}
code = mndKillTrans(pMnode, pTrans);
@@ -1300,9 +1301,9 @@ static int32_t mndProcessKillTransReq(SRpcMsg *pReq) {
_OVER:
if (code != 0) {
mError("trans:%d, failed to kill since %s", killReq.transId, terrstr());
- return -1;
}
+ mndReleaseUser(pMnode, pUser);
mndReleaseTrans(pMnode, pTrans);
return code;
}
diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c
index 219e0fa3dc..9262aa167b 100644
--- a/source/dnode/mnode/impl/src/mndVgroup.c
+++ b/source/dnode/mnode/impl/src/mndVgroup.c
@@ -504,7 +504,7 @@ _OVER:
taosArrayDestroy(pArray);
return code;
}
-//--->
+
int32_t mndAddVnodeToVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray) {
taosArraySort(pArray, (__compar_fn_t)mndCompareDnodeVnodes);
for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) {
@@ -543,7 +543,7 @@ int32_t mndAddVnodeToVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray) {
terrno = TSDB_CODE_MND_NO_ENOUGH_DNODES;
return -1;
}
-//--->
+
int32_t mndRemoveVnodeFromVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray, SVnodeGid *del1, SVnodeGid *del2) {
taosArraySort(pArray, (__compar_fn_t)mndCompareDnodeVnodes);
for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) {
diff --git a/source/dnode/mnode/sdb/inc/sdb.h b/source/dnode/mnode/sdb/inc/sdb.h
index 1fd0260d0d..4a00befa1e 100644
--- a/source/dnode/mnode/sdb/inc/sdb.h
+++ b/source/dnode/mnode/sdb/inc/sdb.h
@@ -301,6 +301,7 @@ void sdbRelease(SSdb *pSdb, void *pObj);
* @return void* The next iterator of the table.
*/
void *sdbFetch(SSdb *pSdb, ESdbType type, void *pIter, void **ppObj);
+void *sdbFetchAll(SSdb *pSdb, ESdbType type, void *pIter, void **ppObj, ESdbStatus *status) ;
/**
* @brief Cancel a traversal
diff --git a/source/dnode/mnode/sdb/src/sdbHash.c b/source/dnode/mnode/sdb/src/sdbHash.c
index abf35b71a9..162da2bd0a 100644
--- a/source/dnode/mnode/sdb/src/sdbHash.c
+++ b/source/dnode/mnode/sdb/src/sdbHash.c
@@ -368,6 +368,34 @@ void *sdbFetch(SSdb *pSdb, ESdbType type, void *pIter, void **ppObj) {
return ppRow;
}
+void *sdbFetchAll(SSdb *pSdb, ESdbType type, void *pIter, void **ppObj, ESdbStatus *status) {
+ *ppObj = NULL;
+
+ SHashObj *hash = sdbGetHash(pSdb, type);
+ if (hash == NULL) return NULL;
+
+ TdThreadRwlock *pLock = &pSdb->locks[type];
+ taosThreadRwlockRdlock(pLock);
+
+ SSdbRow **ppRow = taosHashIterate(hash, pIter);
+ while (ppRow != NULL) {
+ SSdbRow *pRow = *ppRow;
+ if (pRow == NULL) {
+ ppRow = taosHashIterate(hash, ppRow);
+ continue;
+ }
+
+ atomic_add_fetch_32(&pRow->refCount, 1);
+ sdbPrintOper(pSdb, pRow, "fetch");
+ *ppObj = pRow->pObj;
+ *status = pRow->status;
+ break;
+ }
+ taosThreadRwlockUnlock(pLock);
+
+ return ppRow;
+}
+
void sdbCancelFetch(SSdb *pSdb, void *pIter) {
if (pIter == NULL) return;
SSdbRow *pRow = *(SSdbRow **)pIter;
diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h
index 56d86c26a0..7cd82b0ac3 100644
--- a/source/dnode/vnode/src/inc/tq.h
+++ b/source/dnode/vnode/src/inc/tq.h
@@ -65,12 +65,6 @@ struct STqReadHandle {
// tqPush
-typedef struct {
- STaosQueue* queue;
- STaosQall* qall;
- void* qItem;
-} STqInputQ;
-
typedef struct {
// msg info
int64_t consumerId;
@@ -81,11 +75,13 @@ typedef struct {
// rpc info
int64_t reqId;
SRpcHandleInfo rpcInfo;
+ tmr_h timerId;
+ int8_t tmrStopped;
// exec
- int8_t inputStatus;
- int8_t execStatus;
- STqInputQ inputQ;
- SRWLatch lock;
+ int8_t inputStatus;
+ int8_t execStatus;
+ SStreamQ inputQ;
+ SRWLatch lock;
} STqPushHandle;
// tqExec
@@ -153,6 +149,7 @@ int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalHead*
// tqExec
int32_t tqDataExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataBlkRsp* pRsp, int32_t workerId);
+int32_t tqSendPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataBlkRsp* pRsp);
// tqMeta
int32_t tqMetaOpen(STQ* pTq);
@@ -164,13 +161,12 @@ int32_t tqMetaDeleteHandle(STQ* pTq, const char* key);
void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data);
// tqOffset
-STqOffsetStore* STqOffsetOpen(STqOffsetCfg*);
-void STqOffsetClose(STqOffsetStore*);
-
-int64_t tqOffsetFetch(STqOffsetStore* pStore, const char* subscribeKey);
-int32_t tqOffsetCommit(STqOffsetStore* pStore, const char* subscribeKey, int64_t offset);
-int32_t tqOffsetPersist(STqOffsetStore* pStore, const char* subscribeKey);
-int32_t tqOffsetPersistAll(STqOffsetStore* pStore);
+STqOffsetStore* tqOffsetOpen(STqOffsetCfg*);
+void tqOffsetClose(STqOffsetStore*);
+int64_t tqOffsetFetch(STqOffsetStore* pStore, const char* subscribeKey);
+int32_t tqOffsetCommit(STqOffsetStore* pStore, const char* subscribeKey, int64_t offset);
+int32_t tqOffsetPersist(STqOffsetStore* pStore, const char* subscribeKey);
+int32_t tqOffsetPersistAll(STqOffsetStore* pStore);
#ifdef __cplusplus
}
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index 172caf8724..e79de255f3 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -81,12 +81,41 @@ void tqClose(STQ* pTq) {
// TODO
}
+int32_t tqSendPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataBlkRsp* pRsp) {
+ int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqDataBlkRsp(NULL, pRsp);
+ void* buf = rpcMallocCont(tlen);
+ if (buf == NULL) {
+ return -1;
+ }
+
+ ((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_RSP;
+ ((SMqRspHead*)buf)->epoch = pReq->epoch;
+ ((SMqRspHead*)buf)->consumerId = pReq->consumerId;
+
+ void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
+ tEncodeSMqDataBlkRsp(&abuf, pRsp);
+
+ SRpcMsg resp = {
+ .info = pMsg->info,
+ .pCont = buf,
+ .contLen = tlen,
+ .code = 0,
+ };
+ tmsgSendRsp(&resp);
+
+ tqDebug("vg %d from consumer %ld (epoch %d) send rsp, block num: %d, reqOffset: %ld, rspOffset: %ld",
+ TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->blockNum, pRsp->reqOffset, pRsp->rspOffset);
+
+ return 0;
+}
+
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
SMqPollReq* pReq = pMsg->pCont;
int64_t consumerId = pReq->consumerId;
int64_t timeout = pReq->timeout;
int32_t reqEpoch = pReq->epoch;
int64_t fetchOffset;
+ int32_t code = 0;
// get offset to fetch message
if (pReq->currentOffset == TMQ_CONF__RESET_OFFSET__EARLIEAST) {
@@ -155,7 +184,9 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
if (pHead->msgType == TDMT_VND_SUBMIT) {
SSubmitReq* pCont = (SSubmitReq*)&pHead->body;
- tqDataExec(pTq, &pHandle->execHandle, pCont, &rsp, workerId);
+ if (tqDataExec(pTq, &pHandle->execHandle, pCont, &rsp, workerId) < 0) {
+ /*ASSERT(0);*/
+ }
} else {
// TODO
ASSERT(0);
@@ -174,34 +205,16 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
ASSERT(taosArrayGetSize(rsp.blockData) == rsp.blockNum);
ASSERT(taosArrayGetSize(rsp.blockDataLen) == rsp.blockNum);
+ if (rsp.withSchema) {
+ ASSERT(taosArrayGetSize(rsp.blockSchema) == rsp.blockNum);
+ }
rsp.rspOffset = fetchOffset;
- int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqDataBlkRsp(NULL, &rsp);
- void* buf = rpcMallocCont(tlen);
- if (buf == NULL) {
- pMsg->code = -1;
- return -1;
+ if (tqSendPollRsp(pTq, pMsg, pReq, &rsp) < 0) {
+ code = -1;
}
- ((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_RSP;
- ((SMqRspHead*)buf)->epoch = pReq->epoch;
- ((SMqRspHead*)buf)->consumerId = consumerId;
-
- void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
- tEncodeSMqDataBlkRsp(&abuf, &rsp);
-
- SRpcMsg resp = {
- .info = pMsg->info,
- .pCont = buf,
- .contLen = tlen,
- .code = 0,
- };
- tmsgSendRsp(&resp);
-
- tqDebug("vg %d offset %ld from consumer %ld (epoch %d) send rsp, block num: %d, reqOffset: %ld, rspOffset: %ld",
- TD_VID(pTq->pVnode), fetchOffset, consumerId, pReq->epoch, rsp.blockNum, rsp.reqOffset, rsp.rspOffset);
-
// TODO wrap in destroy func
taosArrayDestroy(rsp.blockData);
taosArrayDestroy(rsp.blockDataLen);
@@ -214,7 +227,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
taosArrayDestroyP(rsp.blockTbName, (FDelete)taosMemoryFree);
}
- return 0;
+ return code;
}
int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen) {
diff --git a/source/dnode/vnode/src/tq/tqOffset.c b/source/dnode/vnode/src/tq/tqOffset.c
index 90f512611b..4d83a67579 100644
--- a/source/dnode/vnode/src/tq/tqOffset.c
+++ b/source/dnode/vnode/src/tq/tqOffset.c
@@ -30,7 +30,7 @@ struct STqOffsetStore {
SHashObj* pHash; // SHashObj
};
-STqOffsetStore* STqOffsetOpen(STqOffsetCfg* pCfg) {
+STqOffsetStore* tqOffsetOpen(STqOffsetCfg* pCfg) {
STqOffsetStore* pStore = taosMemoryMalloc(sizeof(STqOffsetStore));
if (pStore == NULL) {
return NULL;
diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c
index f23a14472c..26e9dfe2e2 100644
--- a/source/dnode/vnode/src/tq/tqPush.c
+++ b/source/dnode/vnode/src/tq/tqPush.c
@@ -15,22 +15,80 @@
#include "tq.h"
+void tqTmrRspFunc(void* param, void* tmrId) {
+ STqHandle* pHandle = (STqHandle*)param;
+ atomic_store_8(&pHandle->pushHandle.tmrStopped, 1);
+}
+
int32_t tqExecFromInputQ(STQ* pTq, STqHandle* pHandle) {
+ SMqDataBlkRsp rsp = {0};
// 1. guard and set status executing
- // 2. check processedVer
- // 2.1. if not missed, get msg from queue
- // 2.2. if missed, scan wal
- //
- // 3. exec, after each success, update processed ver
- // first run
- // set exec status closing
- // second run
- // set exec status idle
- //
+ int8_t execStatus =
+ atomic_val_compare_exchange_8(&pHandle->pushHandle.execStatus, TASK_STATUS__IDLE, TASK_STATUS__EXECUTING);
+ if (execStatus == TASK_STATUS__IDLE) {
+ SStreamDataSubmit* pSubmit = NULL;
+ // 2. check processedVer
+ // 2.1. if not missed, get msg from queue
+ // 2.2. if missed, scan wal
+ pSubmit = streamQNextItem(&pHandle->pushHandle.inputQ);
+ while (pHandle->pushHandle.processedVer <= pSubmit->ver) {
+ // read from wal
+ }
+ while (pHandle->pushHandle.processedVer > pSubmit->ver + 1) {
+ streamQSetSuccess(&pHandle->pushHandle.inputQ);
+ streamDataSubmitRefDec(pSubmit);
+ pSubmit = streamQNextItem(&pHandle->pushHandle.inputQ);
+ if (pSubmit == NULL) break;
+ }
+ // 3. exec, after each success, update processed ver
+ // first run
+ while (pSubmit != NULL) {
+ ASSERT(pSubmit->ver == pHandle->pushHandle.processedVer + 1);
+ if (tqDataExec(pTq, &pHandle->execHandle, pSubmit->data, &rsp, 0) < 0) {
+ /*ASSERT(0);*/
+ }
+ // update processed
+ atomic_store_64(&pHandle->pushHandle.processedVer, pSubmit->ver);
+ streamQSetSuccess(&pHandle->pushHandle.inputQ);
+ streamDataSubmitRefDec(pSubmit);
+ if (rsp.blockNum > 0) {
+ goto SEND_RSP;
+ } else {
+ pSubmit = streamQNextItem(&pHandle->pushHandle.inputQ);
+ }
+ }
+ // set exec status closing
+ atomic_store_8(&pHandle->pushHandle.execStatus, TASK_STATUS__CLOSING);
+ // second run
+ while (pSubmit != NULL) {
+ ASSERT(pSubmit->ver == pHandle->pushHandle.processedVer + 1);
+ if (tqDataExec(pTq, &pHandle->execHandle, pSubmit->data, &rsp, 0) < 0) {
+ /*ASSERT(0);*/
+ }
+ // update processed
+ atomic_store_64(&pHandle->pushHandle.processedVer, pSubmit->ver);
+ streamQSetSuccess(&pHandle->pushHandle.inputQ);
+ streamDataSubmitRefDec(pSubmit);
+ if (rsp.blockNum > 0) {
+ goto SEND_RSP;
+ } else {
+ pSubmit = streamQNextItem(&pHandle->pushHandle.inputQ);
+ }
+ }
+ // set exec status idle
+ atomic_store_8(&pHandle->pushHandle.execStatus, TASK_STATUS__IDLE);
+ }
+SEND_RSP:
// 4. if get result
// 4.1 set exec input status blocked and exec status idle
+ atomic_store_8(&pHandle->pushHandle.execStatus, TASK_STATUS__IDLE);
// 4.2 rpc send
+ rsp.rspOffset = pHandle->pushHandle.processedVer;
+ /*if (tqSendPollRsp(pTq, pMsg, pReq, &rsp) < 0) {*/
+ /*return -1;*/
+ /*}*/
// 4.3 clear rpc info
+ memset(&pHandle->pushHandle.rpcInfo, 0, sizeof(SRpcHandleInfo));
return 0;
}
@@ -50,12 +108,15 @@ int32_t tqOpenPushHandle(STQ* pTq, STqHandle* pHandle) {
return 0;
}
-void tqPreparePush(STQ* pTq, STqHandle* pHandle, int64_t reqId, const SRpcHandleInfo* pInfo, int64_t processedVer) {
+int32_t tqPreparePush(STQ* pTq, STqHandle* pHandle, int64_t reqId, const SRpcHandleInfo* pInfo, int64_t processedVer,
+ int64_t timeout) {
memcpy(&pHandle->pushHandle.rpcInfo, pInfo, sizeof(SRpcHandleInfo));
atomic_store_64(&pHandle->pushHandle.reqId, reqId);
atomic_store_64(&pHandle->pushHandle.processedVer, processedVer);
atomic_store_8(&pHandle->pushHandle.inputStatus, TASK_INPUT_STATUS__NORMAL);
- // set timeout timer
+ atomic_store_8(&pHandle->pushHandle.tmrStopped, 0);
+ taosTmrReset(tqTmrRspFunc, (int32_t)timeout, pHandle, tqMgmt.timer, &pHandle->pushHandle.timerId);
+ return 0;
}
int32_t tqEnqueue(STqHandle* pHandle, SStreamDataSubmit* pSubmit) {
diff --git a/source/dnode/vnode/src/vnd/vnodeModule.c b/source/dnode/vnode/src/vnd/vnodeModule.c
index efae74b55a..d0aede145e 100644
--- a/source/dnode/vnode/src/vnd/vnodeModule.c
+++ b/source/dnode/vnode/src/vnd/vnodeModule.c
@@ -69,6 +69,9 @@ int vnodeInit(int nthreads) {
if (walInit() < 0) {
return -1;
}
+ if (tqInit() < 0) {
+ return -1;
+ }
return 0;
}
@@ -94,6 +97,9 @@ void vnodeCleanup() {
taosMemoryFreeClear(vnodeGlobal.threads);
taosThreadCondDestroy(&(vnodeGlobal.hasTask));
taosThreadMutexDestroy(&(vnodeGlobal.mutex));
+
+ walCleanUp();
+ tqCleanUp();
}
int vnodeScheduleTask(int (*execute)(void*), void* arg) {
@@ -155,4 +161,4 @@ static void* loop(void* arg) {
}
return NULL;
-}
\ No newline at end of file
+}
diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c
index add94cb83c..759f2439bd 100644
--- a/source/libs/function/src/builtins.c
+++ b/source/libs/function/src/builtins.c
@@ -103,6 +103,28 @@ static int32_t translateInOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t le
return TSDB_CODE_SUCCESS;
}
+static int32_t translateLogarithm(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
+ int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
+ if (1 != numOfParams && 2 != numOfParams) {
+ return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
+ if (!IS_NUMERIC_TYPE(para1Type)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ if (2 == numOfParams) {
+ uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
+ if (!IS_NUMERIC_TYPE(para2Type)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+ }
+
+ pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE};
+ return TSDB_CODE_SUCCESS;
+}
+
static int32_t translateCount(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
@@ -817,11 +839,20 @@ static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t
int32_t resultBytes = 0;
int32_t sepBytes = 0;
+ //concat_ws separator should be constant string
+ if (hasSep) {
+ SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
+ if (nodeType(pPara) != QUERY_NODE_VALUE) {
+ return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
+ "The first parameter of CONCAT_WS function can only be constant string");
+ }
+ }
+
/* For concat/concat_ws function, if params have NCHAR type, promote the final result to NCHAR */
for (int32_t i = 0; i < numOfParams; ++i) {
SNode* pPara = nodesListGetNode(pFunc->pParameterList, i);
uint8_t paraType = ((SExprNode*)pPara)->resType.type;
- if (!IS_VAR_DATA_TYPE(paraType)) {
+ if (!IS_VAR_DATA_TYPE(paraType) && TSDB_DATA_TYPE_NULL != paraType) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
if (TSDB_DATA_TYPE_NCHAR == paraType) {
@@ -921,16 +952,108 @@ static int32_t translateCast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
return TSDB_CODE_SUCCESS;
}
+/* Following are valid ISO-8601 timezone format:
+ * 1 z/Z
+ * 2 ±hh:mm
+ * 3 ±hhmm
+ * 4 ±hh
+ *
+ */
+
+static bool validateTimezoneFormat(const SValueNode* pVal) {
+ if (TSDB_DATA_TYPE_BINARY != pVal->node.resType.type) {
+ return false;
+ }
+
+ char *tz = varDataVal(pVal->datum.p);
+ int32_t len = varDataLen(pVal->datum.p);
+
+ if (len == 0) {
+ return false;
+ } else if (len == 1 && (tz[0] == 'z' || tz[0] == 'Z')) {
+ return true;
+ } else if ((tz[0] == '+' || tz[0] == '-')) {
+ switch (len) {
+ case 3:
+ case 5: {
+ for (int32_t i = 1; i < len; ++i) {
+ if (!isdigit(tz[i])) {
+ return false;
+ }
+ }
+ break;
+ }
+ case 6: {
+ for (int32_t i = 1; i < len; ++i) {
+ if (i == 3) {
+ if (tz[i] != ':') {
+ return false;
+ }
+ continue;
+ }
+ if (!isdigit(tz[i])) {
+ return false;
+ }
+ }
+ break;
+ }
+ default: {
+ return false;
+ }
+ }
+ } else {
+ return false;
+ }
+
+ return true;
+}
+
+void static addTimezoneParam(SNodeList* pList) {
+ char buf[6] = {0};
+ time_t t = taosTime(NULL);
+ struct tm *tmInfo = taosLocalTime(&t, NULL);
+ strftime(buf, sizeof(buf), "%z", tmInfo);
+ int32_t len = (int32_t)strlen(buf);
+
+ SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
+ pVal->literal = strndup(buf, len);
+ pVal->isDuration =false;
+ pVal->translate = true;
+ pVal->node.resType.type = TSDB_DATA_TYPE_BINARY;
+ pVal->node.resType.bytes = len + VARSTR_HEADER_SIZE;
+ pVal->node.resType.precision = TSDB_TIME_PRECISION_MILLI;
+ pVal->datum.p = taosMemoryCalloc(1, len + VARSTR_HEADER_SIZE +1);
+ varDataSetLen(pVal->datum.p, len);
+ strncpy(varDataVal(pVal->datum.p), pVal->literal, len);
+
+ nodesListAppend(pList, pVal);
+}
+
static int32_t translateToIso8601(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
- if (1 != LIST_LENGTH(pFunc->pParameterList)) {
+ int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
+ if (1 != numOfParams && 2 != numOfParams) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
+ //param0
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (!IS_INTEGER_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
+ //param1
+ if (numOfParams == 2) {
+ SValueNode* pValue = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1);
+
+ if (!validateTimezoneFormat(pValue)) {
+ return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
+ "Invalid timzone format");
+ }
+ } else { //add default client timezone
+ addTimezoneParam(pFunc->pParameterList);
+ }
+
+ //set result type
pFunc->node.resType = (SDataType){.bytes = 64, .type = TSDB_DATA_TYPE_BINARY};
return TSDB_CODE_SUCCESS;
}
@@ -1302,7 +1425,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.name = "log",
.type = FUNCTION_TYPE_LOG,
.classification = FUNC_MGT_SCALAR_FUNC,
- .translateFunc = translateIn2NumOutDou,
+ .translateFunc = translateLogarithm,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = logFunction,
diff --git a/source/libs/index/inc/indexInt.h b/source/libs/index/inc/indexInt.h
index 81d43daf13..24a4e99970 100644
--- a/source/libs/index/inc/indexInt.h
+++ b/source/libs/index/inc/indexInt.h
@@ -131,8 +131,7 @@ typedef struct TFileCacheKey {
char* colName;
int32_t nColName;
} ICacheKey;
-
-int indexFlushCacheToTFile(SIndex* sIdx, void*);
+int indexFlushCacheToTFile(SIndex* sIdx, void*, bool quit);
int64_t indexAddRef(void* p);
int32_t indexRemoveRef(int64_t ref);
diff --git a/source/libs/index/src/index.c b/source/libs/index/src/index.c
index 3d905303d1..ba3aea969f 100644
--- a/source/libs/index/src/index.c
+++ b/source/libs/index/src/index.c
@@ -150,6 +150,7 @@ void indexClose(SIndex* sIdx) {
indexCacheForceToMerge((void*)(*pCache));
indexInfo("%s wait to merge", (*pCache)->colName);
indexWait((void*)(sIdx));
+ indexInfo("%s finish to wait", (*pCache)->colName);
iter = taosHashIterate(sIdx->colObj, iter);
indexCacheUnRef(*pCache);
}
@@ -454,7 +455,7 @@ static void indexDestroyFinalResult(SArray* result) {
taosArrayDestroy(result);
}
-int indexFlushCacheToTFile(SIndex* sIdx, void* cache) {
+int indexFlushCacheToTFile(SIndex* sIdx, void* cache, bool quit) {
if (sIdx == NULL) {
return -1;
}
@@ -464,7 +465,7 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) {
IndexCache* pCache = (IndexCache*)cache;
- while (sIdx->quit && atomic_load_32(&pCache->merging) == 1) {
+ while (quit && atomic_load_32(&pCache->merging) == 1) {
}
TFileReader* pReader = tfileGetReaderByCol(sIdx->tindex, pCache->suid, pCache->colName);
if (pReader == NULL) {
@@ -476,11 +477,11 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) {
indexError("%p immtable is empty, ignore merge opera", pCache);
indexCacheDestroyImm(pCache);
tfileReaderUnRef(pReader);
- if (sIdx->quit) {
+ atomic_store_32(&pCache->merging, 0);
+ if (quit) {
indexPost(sIdx);
}
indexReleaseRef(sIdx->refId);
- atomic_store_32(&pCache->merging, 0);
return 0;
}
@@ -539,10 +540,10 @@ int indexFlushCacheToTFile(SIndex* sIdx, void* cache) {
} else {
indexInfo("success to merge , time cost: %" PRId64 "ms", cost / 1000);
}
- if (sIdx->quit) {
+ atomic_store_32(&pCache->merging, 0);
+ if (quit) {
indexPost(sIdx);
}
- atomic_store_32(&pCache->merging, 0);
indexReleaseRef(sIdx->refId);
return ret;
diff --git a/source/libs/index/src/indexCache.c b/source/libs/index/src/indexCache.c
index 586a3ae573..4e7be245ef 100644
--- a/source/libs/index/src/indexCache.c
+++ b/source/libs/index/src/indexCache.c
@@ -728,9 +728,9 @@ static void doMergeWork(SSchedMsg* msg) {
IndexCache* pCache = msg->ahandle;
SIndex* sidx = (SIndex*)pCache->index;
- sidx->quit = msg->thandle ? true : false;
+ int quit = msg->thandle ? true : false;
taosMemoryFree(msg->thandle);
- indexFlushCacheToTFile(sidx, pCache);
+ indexFlushCacheToTFile(sidx, pCache, quit);
}
static bool indexCacheIteratorNext(Iterate* itera) {
SSkipListIterator* iter = itera->iter;
diff --git a/source/libs/index/test/jsonUT.cc b/source/libs/index/test/jsonUT.cc
index cd5a5d9b0f..48ce8839c4 100644
--- a/source/libs/index/test/jsonUT.cc
+++ b/source/libs/index/test/jsonUT.cc
@@ -51,6 +51,7 @@ class JsonEnv : public ::testing::Test {
tIndexJsonClose(index);
indexOptsDestroy(opts);
printf("destory\n");
+ taosMsleep(1000);
}
SIndexJsonOpts* opts;
SIndexJson* index;
diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c
index cb4a4f104c..e2c9f91af4 100644
--- a/source/libs/nodes/src/nodesCloneFuncs.c
+++ b/source/libs/nodes/src/nodesCloneFuncs.c
@@ -142,14 +142,16 @@ static SNode* valueNodeCopy(const SValueNode* pSrc, SValueNode* pDst) {
break;
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_VARCHAR:
- case TSDB_DATA_TYPE_VARBINARY:
- pDst->datum.p = taosMemoryMalloc(pSrc->node.resType.bytes + VARSTR_HEADER_SIZE + 1);
+ case TSDB_DATA_TYPE_VARBINARY:{
+ int32_t len = varDataTLen(pSrc->datum.p) + 1;
+ pDst->datum.p = taosMemoryCalloc(1, len);
if (NULL == pDst->datum.p) {
nodesDestroyNode(pDst);
return NULL;
}
- memcpy(pDst->datum.p, pSrc->datum.p, pSrc->node.resType.bytes + VARSTR_HEADER_SIZE + 1);
+ memcpy(pDst->datum.p, pSrc->datum.p, len);
break;
+ }
case TSDB_DATA_TYPE_JSON:
case TSDB_DATA_TYPE_DECIMAL:
case TSDB_DATA_TYPE_BLOB:
diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c
index 8003c011a1..5c656b969b 100644
--- a/source/libs/parser/src/parInsert.c
+++ b/source/libs/parser/src/parInsert.c
@@ -941,7 +941,7 @@ static int32_t parseTagToken(char** end, SToken* pToken, SSchema* pSchema, int16
if (p == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
- if (!taosMbsToUcs4(pToken->z, pToken->n, (TdUcs4*)(p), pSchema->bytes - VARSTR_HEADER_SIZE, &output)) {
+ if (!taosMbsToUcs4(pToken->z, pToken->n, (TdUcs4*)(p), pToken->n * TSDB_NCHAR_SIZE, &output)) {
if (errno == E2BIG) {
taosMemoryFree(p);
return generateSyntaxErrMsg(pMsgBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name);
@@ -1743,10 +1743,10 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, char* tN
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
- if (!taosMbsToUcs4(bind[c].buffer, colLen, (TdUcs4*)(p), pSchema->bytes - VARSTR_HEADER_SIZE, &output)) {
+ if (!taosMbsToUcs4(bind[c].buffer, colLen, (TdUcs4*)(p), colLen * TSDB_NCHAR_SIZE, &output)) {
if (errno == E2BIG) {
taosMemoryFree(p);
- code = generateSyntaxErrMsg(&pBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pSchema->name);
+ code = generateSyntaxErrMsg(&pBuf, TSDB_CODE_PAR_VALUE_TOO_LONG, pTagSchema->name);
goto end;
}
char buf[512] = {0};
@@ -2132,12 +2132,12 @@ static int32_t smlBuildTagRow(SArray* cols, SParsedDataColInfo* tags, SSchema* p
val.nData = kv->length;
} else if (pTagSchema->type == TSDB_DATA_TYPE_NCHAR) {
int32_t output = 0;
- void* p = taosMemoryCalloc(1, pTagSchema->bytes - VARSTR_HEADER_SIZE);
- if (p == NULL) {
+ void *p = taosMemoryCalloc(1, kv->length * TSDB_NCHAR_SIZE);
+ if(p == NULL){
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
- if (!taosMbsToUcs4(kv->value, kv->length, (TdUcs4*)(p), pTagSchema->bytes - VARSTR_HEADER_SIZE, &output)) {
+ if (!taosMbsToUcs4(kv->value, kv->length, (TdUcs4*)(p), kv->length * TSDB_NCHAR_SIZE, &output)) {
if (errno == E2BIG) {
taosMemoryFree(p);
code = generateSyntaxErrMsg(msg, TSDB_CODE_PAR_VALUE_TOO_LONG, pTagSchema->name);
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index 55a473982b..6fc9680c09 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -712,7 +712,6 @@ static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SD
pVal->datum.p = taosMemoryCalloc(1, targetDt.bytes + 1);
if (NULL == pVal->datum.p) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_OUT_OF_MEMORY);
- ;
}
int32_t len = 0;
diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c
index e5d606ffff..3d0204e355 100644
--- a/source/libs/qworker/src/qwUtil.c
+++ b/source/libs/qworker/src/qwUtil.c
@@ -1,10 +1,10 @@
-#include "qworker.h"
#include "dataSinkMgt.h"
#include "executor.h"
#include "planner.h"
#include "query.h"
#include "qwInt.h"
#include "qwMsg.h"
+#include "qworker.h"
#include "tcommon.h"
#include "tmsg.h"
#include "tname.h"
@@ -406,7 +406,6 @@ int32_t qwDropTask(QW_FPARAMS_DEF) {
return TSDB_CODE_SUCCESS;
}
-
void qwSetHbParam(int64_t refId, SQWHbParam **pParam) {
int32_t paramIdx = 0;
int32_t newParamIdx = 0;
@@ -430,11 +429,10 @@ void qwSetHbParam(int64_t refId, SQWHbParam **pParam) {
*pParam = &gQwMgmt.param[paramIdx];
}
-
-void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx) {
+void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx) {
char dbFName[TSDB_DB_FNAME_LEN];
char tbName[TSDB_TABLE_NAME_LEN];
-
+
qGetQueriedTableSchemaVersion(pTaskInfo, dbFName, tbName, &ctx->tbInfo.sversion, &ctx->tbInfo.tversion);
if (dbFName[0] && tbName[0]) {
@@ -444,7 +442,6 @@ void qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx) {
}
}
-
void qwCloseRef(void) {
taosWLockLatch(&gQwMgmt.lock);
if (atomic_load_32(&gQwMgmt.qwNum) <= 0 && gQwMgmt.qwRef >= 0) {
@@ -454,13 +451,13 @@ void qwCloseRef(void) {
taosWUnLockLatch(&gQwMgmt.lock);
}
-
void qwDestroySchStatus(SQWSchStatus *pStatus) { taosHashCleanup(pStatus->tasksHash); }
void qwDestroyImpl(void *pMgmt) {
SQWorker *mgmt = (SQWorker *)pMgmt;
- taosTmrStopA(&mgmt->hbTimer);
+ taosTmrStop(mgmt->hbTimer);
+ mgmt->hbTimer = NULL;
taosTmrCleanUp(mgmt->timer);
// TODO STOP ALL QUERY
@@ -527,10 +524,10 @@ int64_t qwGetTimeInQueue(SQWorker *mgmt, EQueueType type) {
switch (type) {
case QUERY_QUEUE:
pStat = &mgmt->stat.msgStat.waitTime[0];
- return pStat->num ? (pStat->total/pStat->num) : 0;
+ return pStat->num ? (pStat->total / pStat->num) : 0;
case FETCH_QUEUE:
pStat = &mgmt->stat.msgStat.waitTime[1];
- return pStat->num ? (pStat->total/pStat->num) : 0;
+ return pStat->num ? (pStat->total / pStat->num) : 0;
default:
qError("unsupported queue type %d", type);
}
@@ -538,5 +535,3 @@ int64_t qwGetTimeInQueue(SQWorker *mgmt, EQueueType type) {
return -1;
}
-
-
diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c
index 6ee5f038d6..370ea8fa2e 100644
--- a/source/libs/scalar/src/sclfunc.c
+++ b/source/libs/scalar/src/sclfunc.c
@@ -15,7 +15,11 @@ typedef void (*_trim_fn)(char *, char*, int32_t, int32_t);
typedef int16_t (*_len_fn)(char *, int32_t);
/** Math functions **/
-static double tlog(double v, double base) {
+static double tlog(double v) {
+ return log(v);
+}
+
+static double tlog2(double v, double base) {
double a = log(v);
double b = log(base);
if (isnan(a) || isinf(a)) {
@@ -444,7 +448,8 @@ int32_t concatFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu
for (int32_t k = 0; k < numOfRows; ++k) {
bool hasNull = false;
for (int32_t i = 0; i < inputNum; ++i) {
- if (colDataIsNull_s(pInputData[i], k)) {
+ if (colDataIsNull_s(pInputData[i], k) ||
+ GET_PARAM_TYPE(&pInput[i]) == TSDB_DATA_TYPE_NULL) {
colDataAppendNULL(pOutputData, k);
hasNull = true;
break;
@@ -520,7 +525,8 @@ int32_t concatWsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p
char *output = outputBuf;
for (int32_t k = 0; k < numOfRows; ++k) {
- if (colDataIsNull_s(pInputData[0], k)) {
+ if (colDataIsNull_s(pInputData[0], k) ||
+ GET_PARAM_TYPE(&pInput[0]) == TSDB_DATA_TYPE_NULL) {
colDataAppendNULL(pOutputData, k);
continue;
}
@@ -528,7 +534,8 @@ int32_t concatWsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p
int16_t dataLen = 0;
bool hasNull = false;
for (int32_t i = 1; i < inputNum; ++i) {
- if (colDataIsNull_s(pInputData[i], k)) {
+ if (colDataIsNull_s(pInputData[i], k) ||
+ GET_PARAM_TYPE(&pInput[i]) == TSDB_DATA_TYPE_NULL) {
hasNull = true;
break;
}
@@ -849,6 +856,11 @@ int32_t castFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutp
int32_t toISO8601Function(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
int32_t type = GET_PARAM_TYPE(pInput);
+ char* tz;
+ int32_t tzLen;
+ tz = varDataVal(pInput[1].columnData->pData);
+ tzLen = varDataLen(pInput[1].columnData->pData);
+
for (int32_t i = 0; i < pInput[0].numOfRows; ++i) {
if (colDataIsNull_s(pInput[0].columnData, i)) {
colDataAppendNULL(pOutput->columnData, i);
@@ -880,9 +892,13 @@ int32_t toISO8601Function(SScalarParam *pInput, int32_t inputNum, SScalarParam *
}
struct tm *tmInfo = taosLocalTime((const time_t *)&timeVal, NULL);
- strftime(buf, sizeof(buf), "%Y-%m-%dT%H:%M:%S%z", tmInfo);
+ strftime(buf, sizeof(buf), "%Y-%m-%dT%H:%M:%S", tmInfo);
int32_t len = (int32_t)strlen(buf);
+ //add timezone string
+ snprintf(buf + len, tzLen + 1, "%s", tz);
+ len += tzLen;
+
if (hasFraction) {
int32_t fracLen = (int32_t)strlen(fraction) + 1;
char *tzInfo = strchr(buf, '+');
@@ -1365,7 +1381,11 @@ int32_t powFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutpu
}
int32_t logFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
- return doScalarFunctionUnique2(pInput, inputNum, pOutput, tlog);
+ if (inputNum == 1) {
+ return doScalarFunctionUnique(pInput, inputNum, pOutput, tlog);
+ } else {
+ return doScalarFunctionUnique2(pInput, inputNum, pOutput, tlog2);
+ }
}
int32_t sqrtFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) {
diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h
index a8093f46a2..e680e30042 100644
--- a/source/libs/transport/inc/transComm.h
+++ b/source/libs/transport/inc/transComm.h
@@ -351,6 +351,23 @@ bool transEpSetIsEqual(SEpSet* a, SEpSet* b);
*/
void transThreadOnce();
+// ref mgt
+// handle
+typedef struct SExHandle {
+ void* handle;
+ int64_t refId;
+ void* pThrd;
+} SExHandle;
+
+void transInitEnv();
+int32_t transOpenExHandleMgt(int size);
+void transCloseExHandleMgt(int32_t mgt);
+int64_t transAddExHandle(int32_t mgt, void* p);
+int32_t transRemoveExHandle(int32_t mgt, int64_t refId);
+SExHandle* transAcquireExHandle(int32_t mgt, int64_t refId);
+int32_t transReleaseExHandle(int32_t mgt, int64_t refId);
+void transDestoryExHandle(void* handle);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/libs/transport/inc/transportInt.h b/source/libs/transport/inc/transportInt.h
index 8aeae1b5ad..c328629c4b 100644
--- a/source/libs/transport/inc/transportInt.h
+++ b/source/libs/transport/inc/transportInt.h
@@ -22,13 +22,13 @@
#include "lz4.h"
#include "os.h"
#include "taoserror.h"
+#include "tglobal.h"
#include "thash.h"
-#include "tref.h"
#include "tmsg.h"
#include "transLog.h"
+#include "tref.h"
#include "trpc.h"
#include "tutil.h"
-#include "tglobal.h"
#ifdef __cplusplus
extern "C" {
@@ -55,9 +55,9 @@ typedef struct {
bool (*retry)(int32_t code);
int index;
- int32_t refCount;
void* parent;
void* tcphandle; // returned handle from TCP initialization
+ int32_t refMgt;
TdThreadMutex mutex;
} SRpcInfo;
diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c
index 84b0156e36..925de2f321 100644
--- a/source/libs/transport/src/trans.c
+++ b/source/libs/transport/src/trans.c
@@ -36,6 +36,8 @@ static int32_t transValidLocalFqdn(const char* localFqdn, uint32_t* ip) {
return 0;
}
void* rpcOpen(const SRpcInit* pInit) {
+ transInitEnv();
+
SRpcInfo* pRpc = taosMemoryCalloc(1, sizeof(SRpcInfo));
if (pRpc == NULL) {
return NULL;
@@ -79,7 +81,9 @@ void* rpcOpen(const SRpcInit* pInit) {
void rpcClose(void* arg) {
SRpcInfo* pRpc = (SRpcInfo*)arg;
(*taosCloseHandle[pRpc->connType])(pRpc->tcphandle);
+ transCloseExHandleMgt(pRpc->refMgt);
taosMemoryFree(pRpc);
+
return;
}
diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c
index a8e79266ac..580ab30b78 100644
--- a/source/libs/transport/src/transCli.c
+++ b/source/libs/transport/src/transCli.c
@@ -15,6 +15,9 @@
#ifdef USE_UV
#include "transComm.h"
+static int32_t transSCliInst = 0;
+static int32_t refMgt = 0;
+
typedef struct SCliConn {
T_REF_DECLARE()
uv_connect_t connReq;
@@ -846,6 +849,11 @@ void* transInitClient(uint32_t ip, uint32_t port, char* label, int numOfThreads,
}
cli->pThreadObj[i] = pThrd;
}
+ int ref = atomic_add_fetch_32(&transSCliInst, 1);
+ if (ref == 1) {
+ refMgt = transOpenExHandleMgt(50000);
+ }
+
return cli;
}
@@ -954,7 +962,7 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
* upper layer handle retry if code equal TSDB_CODE_RPC_NETWORK_UNAVAIL
*/
tmsg_t msgType = pCtx->msgType;
- if ((pTransInst->retry != NULL && (pTransInst->retry(pResp->code))) ||
+ if ((pTransInst->retry != NULL && pEpSet->numOfEps > 1 && (pTransInst->retry(pResp->code))) ||
(pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || pResp->code == TSDB_CODE_APP_NOT_READY ||
pResp->code == TSDB_CODE_NODE_NOT_DEPLOYED || pResp->code == TSDB_CODE_SYN_NOT_LEADER)) {
pMsg->sent = 0;
@@ -1019,6 +1027,10 @@ void transCloseClient(void* arg) {
}
taosMemoryFree(cli->pThreadObj);
taosMemoryFree(cli);
+ int ref = atomic_sub_fetch_32(&transSCliInst, 1);
+ if (ref == 0) {
+ transCloseExHandleMgt(refMgt);
+ }
}
void transRefCliHandle(void* handle) {
if (handle == NULL) {
diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c
index 333ec44fe4..a04e8b5fca 100644
--- a/source/libs/transport/src/transComm.c
+++ b/source/libs/transport/src/transComm.c
@@ -470,4 +470,41 @@ bool transEpSetIsEqual(SEpSet* a, SEpSet* b) {
}
return true;
}
+
+void transInitEnv() {
+ //
+ uv_os_setenv("UV_TCP_SINGLE_ACCEPT", "1");
+}
+int32_t transOpenExHandleMgt(int size) {
+ // added into once later
+ return taosOpenRef(size, transDestoryExHandle);
+}
+void transCloseExHandleMgt(int32_t mgt) {
+ // close ref
+ taosCloseRef(mgt);
+}
+int64_t transAddExHandle(int32_t mgt, void* p) {
+ // acquire extern handle
+ return taosAddRef(mgt, p);
+}
+int32_t transRemoveExHandle(int32_t mgt, int64_t refId) {
+ // acquire extern handle
+ return taosRemoveRef(mgt, refId);
+}
+
+SExHandle* transAcquireExHandle(int32_t mgt, int64_t refId) {
+ // acquire extern handle
+ return (SExHandle*)taosAcquireRef(mgt, refId);
+}
+
+int32_t transReleaseExHandle(int32_t mgt, int64_t refId) {
+ // release extern handle
+ return taosReleaseRef(mgt, refId);
+}
+void transDestoryExHandle(void* handle) {
+ if (handle == NULL) {
+ return;
+ }
+ taosMemoryFree(handle);
+}
#endif
diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c
index 52b36433bb..608fd00b2c 100644
--- a/source/libs/transport/src/transSvr.c
+++ b/source/libs/transport/src/transSvr.c
@@ -19,8 +19,9 @@
static TdThreadOnce transModuleInit = PTHREAD_ONCE_INIT;
-static char* notify = "a";
-static int tranSSvrInst = 0;
+static char* notify = "a";
+static int32_t tranSSvrInst = 0;
+static int32_t refMgt = 0;
typedef struct {
int notifyCount; //
@@ -99,13 +100,6 @@ typedef struct SServerObj {
bool inited;
} SServerObj;
-// handle
-typedef struct SExHandle {
- void* handle;
- int64_t refId;
- SWorkThrdObj* pThrd;
-} SExHandle;
-
static void uvAllocConnBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf);
static void uvAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf);
static void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf);
@@ -150,14 +144,14 @@ static void (*transAsyncHandle[])(SSvrMsg* msg, SWorkThrdObj* thrd) = {uvHandleR
static int32_t exHandlesMgt;
-void uvInitEnv();
-void uvOpenExHandleMgt(int size);
-void uvCloseExHandleMgt();
-int64_t uvAddExHandle(void* p);
-int32_t uvRemoveExHandle(int64_t refId);
-int32_t uvReleaseExHandle(int64_t refId);
-void uvDestoryExHandle(void* handle);
-SExHandle* uvAcquireExHandle(int64_t refId);
+// void uvInitEnv();
+// void uvOpenExHandleMgt(int size);
+// void uvCloseExHandleMgt();
+// int64_t uvAddExHandle(void* p);
+// int32_t uvRemoveExHandle(int64_t refId);
+// int32_t uvReleaseExHandle(int64_t refId);
+// void uvDestoryExHandle(void* handle);
+// SExHandle* uvAcquireExHandle(int64_t refId);
static void uvDestroyConn(uv_handle_t* handle);
@@ -210,7 +204,7 @@ static bool addHandleToAcceptloop(void* arg);
do { \
if (refId > 0) { \
tTrace("server handle step1"); \
- SExHandle* exh2 = uvAcquireExHandle(refId); \
+ SExHandle* exh2 = transAcquireExHandle(refMgt, refId); \
if (exh2 == NULL || refId != exh2->refId) { \
tTrace("server handle %p except, may already freed, ignore msg, ref1: %" PRIu64 ", ref2 : %" PRIu64 "", exh1, \
exh2 ? exh2->refId : 0, refId); \
@@ -218,7 +212,7 @@ static bool addHandleToAcceptloop(void* arg);
} \
} else if (refId == 0) { \
tTrace("server handle step2"); \
- SExHandle* exh2 = uvAcquireExHandle(refId); \
+ SExHandle* exh2 = transAcquireExHandle(refMgt, refId); \
if (exh2 == NULL || refId != exh2->refId) { \
tTrace("server handle %p except, may already freed, ignore msg, ref1: %" PRIu64 ", ref2 : %" PRIu64 "", exh1, \
refId, exh2 ? exh2->refId : 0); \
@@ -300,14 +294,14 @@ static void uvHandleReq(SSvrConn* pConn) {
// 2. once send out data, cli conn released to conn pool immediately
// 3. not mixed with persist
- transMsg.info.handle = (void*)uvAcquireExHandle(pConn->refId);
+ transMsg.info.handle = (void*)transAcquireExHandle(refMgt, pConn->refId);
transMsg.info.refId = pConn->refId;
tTrace("server handle %p conn: %p translated to app, refId: %" PRIu64 "", transMsg.info.handle, pConn, pConn->refId);
assert(transMsg.info.handle != NULL);
if (pHead->noResp == 1) {
transMsg.info.refId = -1;
}
- uvReleaseExHandle(pConn->refId);
+ transReleaseExHandle(refMgt, pConn->refId);
STrans* pTransInst = pConn->pTransInst;
(*pTransInst->cfp)(pTransInst->parent, &transMsg, NULL);
@@ -535,15 +529,15 @@ void uvWorkerAsyncCb(uv_async_t* handle) {
SExHandle* exh1 = transMsg.info.handle;
int64_t refId = transMsg.info.refId;
- SExHandle* exh2 = uvAcquireExHandle(refId);
+ SExHandle* exh2 = transAcquireExHandle(refMgt, refId);
if (exh2 == NULL || exh1 != exh2) {
tTrace("server handle except msg %p, ignore it", exh1);
- uvReleaseExHandle(refId);
+ transReleaseExHandle(refMgt, refId);
destroySmsg(msg);
continue;
}
msg->pConn = exh1->handle;
- uvReleaseExHandle(refId);
+ transReleaseExHandle(refMgt, refId);
(*transAsyncHandle[msg->type])(msg, pThrd);
}
}
@@ -785,8 +779,8 @@ static SSvrConn* createConn(void* hThrd) {
SExHandle* exh = taosMemoryMalloc(sizeof(SExHandle));
exh->handle = pConn;
exh->pThrd = pThrd;
- exh->refId = uvAddExHandle(exh);
- uvAcquireExHandle(exh->refId);
+ exh->refId = transAddExHandle(refMgt, exh);
+ transAcquireExHandle(refMgt, exh->refId);
pConn->refId = exh->refId;
transRefSrvHandle(pConn);
@@ -815,14 +809,14 @@ static void destroyConnRegArg(SSvrConn* conn) {
}
}
static int reallocConnRefHandle(SSvrConn* conn) {
- uvReleaseExHandle(conn->refId);
- uvRemoveExHandle(conn->refId);
+ transReleaseExHandle(refMgt, conn->refId);
+ transRemoveExHandle(refMgt, conn->refId);
// avoid app continue to send msg on invalid handle
SExHandle* exh = taosMemoryMalloc(sizeof(SExHandle));
exh->handle = conn;
exh->pThrd = conn->hostThrd;
- exh->refId = uvAddExHandle(exh);
- uvAcquireExHandle(exh->refId);
+ exh->refId = transAddExHandle(refMgt, exh);
+ transAcquireExHandle(refMgt, exh->refId);
conn->refId = exh->refId;
return 0;
@@ -834,8 +828,8 @@ static void uvDestroyConn(uv_handle_t* handle) {
}
SWorkThrdObj* thrd = conn->hostThrd;
- uvReleaseExHandle(conn->refId);
- uvRemoveExHandle(conn->refId);
+ transReleaseExHandle(refMgt, conn->refId);
+ transRemoveExHandle(refMgt, conn->refId);
tDebug("server conn %p destroy", conn);
// uv_timer_stop(&conn->pTimer);
@@ -883,8 +877,11 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
srv->port = port;
uv_loop_init(srv->loop);
- taosThreadOnce(&transModuleInit, uvInitEnv);
- tranSSvrInst++;
+ // taosThreadOnce(&transModuleInit, uvInitEnv);
+ int ref = atomic_add_fetch_32(&tranSSvrInst, 1);
+ if (ref == 1) {
+ refMgt = transOpenExHandleMgt(50000);
+ }
assert(0 == uv_pipe_init(srv->loop, &srv->pipeListen, 0));
#ifdef WINDOWS
@@ -944,43 +941,6 @@ End:
return NULL;
}
-void uvInitEnv() {
- uv_os_setenv("UV_TCP_SINGLE_ACCEPT", "1");
- uvOpenExHandleMgt(10000);
-}
-void uvOpenExHandleMgt(int size) {
- // added into once later
- exHandlesMgt = taosOpenRef(size, uvDestoryExHandle);
-}
-void uvCloseExHandleMgt() {
- // close ref
- taosCloseRef(exHandlesMgt);
-}
-int64_t uvAddExHandle(void* p) {
- // acquire extern handle
- return taosAddRef(exHandlesMgt, p);
-}
-int32_t uvRemoveExHandle(int64_t refId) {
- // acquire extern handle
- return taosRemoveRef(exHandlesMgt, refId);
-}
-
-SExHandle* uvAcquireExHandle(int64_t refId) {
- // acquire extern handle
- return (SExHandle*)taosAcquireRef(exHandlesMgt, refId);
-}
-
-int32_t uvReleaseExHandle(int64_t refId) {
- // release extern handle
- return taosReleaseRef(exHandlesMgt, refId);
-}
-void uvDestoryExHandle(void* handle) {
- if (handle == NULL) {
- return;
- }
- taosMemoryFree(handle);
-}
-
void uvHandleQuit(SSvrMsg* msg, SWorkThrdObj* thrd) {
thrd->quit = true;
if (QUEUE_IS_EMPTY(&thrd->conn)) {
@@ -1075,11 +1035,11 @@ void transCloseServer(void* arg) {
taosMemoryFree(srv);
- tranSSvrInst--;
- if (tranSSvrInst == 0) {
- TdThreadOnce tmpInit = PTHREAD_ONCE_INIT;
- memcpy(&transModuleInit, &tmpInit, sizeof(TdThreadOnce));
- uvCloseExHandleMgt();
+ int ref = atomic_sub_fetch_32(&tranSSvrInst, 1);
+ if (ref == 0) {
+ // TdThreadOnce tmpInit = PTHREAD_ONCE_INIT;
+ // memcpy(&transModuleInit, &tmpInit, sizeof(TdThreadOnce));
+ transCloseExHandleMgt(refMgt);
}
}
@@ -1119,11 +1079,11 @@ void transReleaseSrvHandle(void* handle) {
tTrace("server conn %p start to release", exh->handle);
transSendAsync(pThrd->asyncPool, &m->q);
- uvReleaseExHandle(refId);
+ transReleaseExHandle(refMgt, refId);
return;
_return1:
tTrace("server handle %p failed to send to release handle", exh);
- uvReleaseExHandle(refId);
+ transReleaseExHandle(refMgt, refId);
return;
_return2:
tTrace("server handle %p failed to send to release handle", exh);
@@ -1146,12 +1106,12 @@ void transSendResponse(const STransMsg* msg) {
m->type = Normal;
tDebug("server conn %p start to send resp (1/2)", exh->handle);
transSendAsync(pThrd->asyncPool, &m->q);
- uvReleaseExHandle(refId);
+ transReleaseExHandle(refMgt, refId);
return;
_return1:
tTrace("server handle %p failed to send resp", exh);
rpcFreeCont(msg->pCont);
- uvReleaseExHandle(refId);
+ transReleaseExHandle(refMgt, refId);
return;
_return2:
tTrace("server handle %p failed to send resp", exh);
@@ -1174,13 +1134,13 @@ void transRegisterMsg(const STransMsg* msg) {
m->type = Register;
tTrace("server conn %p start to register brokenlink callback", exh->handle);
transSendAsync(pThrd->asyncPool, &m->q);
- uvReleaseExHandle(refId);
+ transReleaseExHandle(refMgt, refId);
return;
_return1:
tTrace("server handle %p failed to send to register brokenlink", exh);
rpcFreeCont(msg->pCont);
- uvReleaseExHandle(refId);
+ transReleaseExHandle(refMgt, refId);
return;
_return2:
tTrace("server handle %p failed to send to register brokenlink", exh);
diff --git a/source/util/src/terror.c b/source/util/src/terror.c
index b81d81c736..74fc14ecdd 100644
--- a/source/util/src/terror.c
+++ b/source/util/src/terror.c
@@ -90,6 +90,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RPC_AUTH_FAILURE, "Authentication failur
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_NETWORK_UNAVAIL, "Unable to establish connection")
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_FQDN_ERROR, "Unable to resolve FQDN")
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_PORT_EADDRINUSE, "Port already in use")
+TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INDIRECT_NETWORK_UNAVAIL, "Unable to establish connection")
//client
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_OPERATION, "Invalid operation")
@@ -244,7 +245,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_SINGLE_STB_MODE_DB, "Database is single st
// mnode-infoSchema
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_SYS_TABLENAME, "Invalid system table name")
-
// mnode-func
TAOS_DEFINE_ERROR(TSDB_CODE_MND_FUNC_ALREADY_EXIST, "Func already exists")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_FUNC_NOT_EXIST, "Func not exists")
@@ -325,9 +325,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, "Invalid table ID")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_TYPE, "Invalid table type")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION, "Invalid table schema version")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_ALREADY_EXIST, "Table already exists")
-TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_NOT_EXIST, "Table not exists")
-TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_ALREADY_EXIST, "Stable already exists")
-TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_NOT_EXIST, "Stable not exists")
+TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_NOT_EXIST, "Table not exists")
+TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_ALREADY_EXIST, "Stable already exists")
+TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_NOT_EXIST, "Stable not exists")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_CONFIG, "Invalid configuration")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INIT_FAILED, "Tsdb init failed")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_DISKSPACE, "No diskspace for tsdb")
diff --git a/source/util/src/tsched.c b/source/util/src/tsched.c
index ee1f418561..691a0d34d4 100644
--- a/source/util/src/tsched.c
+++ b/source/util/src/tsched.c
@@ -23,19 +23,19 @@
#define DUMP_SCHEDULER_TIME_WINDOW 30000 // every 30sec, take a snap shot of task queue.
typedef struct {
- char label[TSDB_LABEL_LEN];
- tsem_t emptySem;
- tsem_t fullSem;
+ char label[TSDB_LABEL_LEN];
+ tsem_t emptySem;
+ tsem_t fullSem;
TdThreadMutex queueMutex;
- int32_t fullSlot;
- int32_t emptySlot;
- int32_t queueSize;
- int32_t numOfThreads;
- TdThread *qthread;
- SSchedMsg *queue;
- bool stop;
- void *pTmrCtrl;
- void *pTimer;
+ int32_t fullSlot;
+ int32_t emptySlot;
+ int32_t queueSize;
+ int32_t numOfThreads;
+ TdThread *qthread;
+ SSchedMsg *queue;
+ bool stop;
+ void *pTmrCtrl;
+ void *pTimer;
} SSchedQueue;
static void *taosProcessSchedQueue(void *param);
@@ -218,7 +218,8 @@ void taosCleanUpScheduler(void *param) {
taosThreadMutexDestroy(&pSched->queueMutex);
if (pSched->pTimer) {
- taosTmrStopA(&pSched->pTimer);
+ taosTmrStop(pSched->pTimer);
+ pSched->pTimer = NULL;
}
if (pSched->queue) taosMemoryFree(pSched->queue);
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index 12b678eeae..e86f07620e 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -57,6 +57,8 @@
# ---- mnode
./test.sh -f tsim/mnode/basic1.sim
./test.sh -f tsim/mnode/basic2.sim
+./test.sh -f tsim/mnode/basic3.sim
+./test.sh -f tsim/mnode/basic4.sim
# ---- show
./test.sh -f tsim/show/basic.sim
diff --git a/tests/script/tsim/mnode/basic3.sim b/tests/script/tsim/mnode/basic3.sim
index 3c69e6ed51..edbf429075 100644
--- a/tests/script/tsim/mnode/basic3.sim
+++ b/tests/script/tsim/mnode/basic3.sim
@@ -3,6 +3,10 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
+system sh/cfg.sh -n dnode1 -c transPullupInterval -v 1
+system sh/cfg.sh -n dnode2 -c transPullupInterval -v 1
+system sh/cfg.sh -n dnode3 -c transPullupInterval -v 1
+system sh/cfg.sh -n dnode4 -c transPullupInterval -v 1
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
@@ -18,7 +22,7 @@ $x = 0
step1:
$x = $x + 1
sleep 1000
- if $x == 50 then
+ if $x == 10 then
return -1
endi
sql show dnodes -x step1
@@ -41,7 +45,7 @@ $x = 0
step2:
$x = $x + 1
sleep 1000
- if $x == 50 then
+ if $x == 10 then
return -1
endi
sql show mnodes -x step2
@@ -72,7 +76,7 @@ $x = 0
step4:
$x = $x + 1
sleep 1000
- if $x == 50 then
+ if $x == 10 then
return -1
endi
sql show mnodes -x step4
@@ -102,7 +106,7 @@ $x = 0
step5:
$x = $x + 1
sleep 1000
- if $x == 50 then
+ if $x == 10 then
return -1
endi
sql show mnodes -x step5
@@ -127,7 +131,7 @@ $x = 0
step6:
$x = $x + 1
sleep 1000
- if $x == 50 then
+ if $x == 10 then
return -1
endi
sql show mnodes -x step6
diff --git a/tests/script/tsim/mnode/basic4.sim b/tests/script/tsim/mnode/basic4.sim
new file mode 100644
index 0000000000..2a4a9d3626
--- /dev/null
+++ b/tests/script/tsim/mnode/basic4.sim
@@ -0,0 +1,194 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+sql connect
+
+print =============== step1: create dnodes
+sql create dnode $hostname port 7200
+sql create dnode $hostname port 7300
+
+$x = 0
+step1:
+ $x = $x + 1
+ sleep 1000
+ if $x == 5 then
+ return -1
+ endi
+sql show dnodes -x step1
+if $data(1)[4] != ready then
+ goto step1
+endi
+if $data(2)[4] != ready then
+ goto step1
+endi
+
+print =============== step2: create mnode 2
+sql create mnode on dnode 2
+sql_error create mnode on dnode 3
+
+system sh/exec.sh -n dnode3 -s start
+
+$x = 0
+step2:
+ $x = $x + 1
+ sleep 1000
+ if $x == 5 then
+ return -1
+ endi
+sql show dnodes -x step2
+if $data(1)[4] != ready then
+ goto step2
+endi
+if $data(2)[4] != ready then
+ goto step2
+endi
+
+system sh/exec.sh -n dnode3 -s stop
+sql_error create mnode on dnode 3
+
+print =============== step3: show mnodes
+
+$x = 0
+step3:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+sql show mnodes -x step3
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4]
+
+if $data(1)[2] != LEADER then
+ goto step3
+endi
+if $data(2)[2] != FOLLOWER then
+ goto step3
+endi
+if $data(3)[2] != OFFLINE then
+ goto step3
+endi
+if $data(1)[3] != READY then
+ goto step3
+endi
+if $data(2)[3] != READY then
+ goto step3
+endi
+if $data(3)[3] != CREATING then
+ goto step3
+endi
+
+print =============== step4: start dnode3
+system sh/exec.sh -n dnode3 -s start
+
+$x = 0
+step4:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+sql show mnodes -x step4
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4]
+
+if $data(1)[2] != LEADER then
+ goto step4
+endi
+if $data(2)[2] != FOLLOWER then
+ goto step4
+endi
+if $data(3)[2] != FOLLOWER then
+ goto step4
+endi
+if $data(1)[3] != READY then
+ goto step4
+endi
+if $data(2)[3] != READY then
+ goto step4
+endi
+if $data(3)[3] != READY then
+ goto step4
+endi
+
+print =============== step5: drop mnode 3 and stop dnode3
+system sh/exec.sh -n dnode3 -s stop
+sql_error drop mnode on dnode 3
+
+$x = 0
+step5:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+sql show mnodes -x step5
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4]
+
+if $data(1)[2] != LEADER then
+ goto step5
+endi
+if $data(2)[2] != FOLLOWER then
+ goto step5
+endi
+if $data(3)[2] != OFFLINE then
+ goto step5
+endi
+if $data(1)[3] != READY then
+ goto step5
+endi
+if $data(2)[3] != READY then
+ goto step5
+endi
+if $data(3)[3] != DROPPING then
+ goto step5
+endi
+
+print =============== step6: start dnode3
+system sh/exec.sh -n dnode3 -s start
+
+$x = 0
+step6:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+sql show mnodes -x step6
+print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4]
+print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4]
+
+if $rows != 2 then
+ goto step6
+endi
+if $data(1)[2] != LEADER then
+ goto step6
+endi
+if $data(2)[2] != FOLLOWER then
+ goto step6
+endi
+if $data(3)[2] != null then
+ goto step6
+endi
+if $data(1)[3] != READY then
+ goto step6
+endi
+if $data(2)[3] != READY then
+ goto step6
+endi
+if $data(3)[3] != null then
+ goto step6
+endi
+
+system sh/exec.sh -n dnode1 -s stop
+system sh/exec.sh -n dnode2 -s stop
+system sh/exec.sh -n dnode3 -s stop
+system sh/exec.sh -n dnode4 -s stop
\ No newline at end of file
diff --git a/tests/script/tsim/stable/column_add.sim b/tests/script/tsim/stable/column_add.sim
index a5d9b48508..db592e6c69 100644
--- a/tests/script/tsim/stable/column_add.sim
+++ b/tests/script/tsim/stable/column_add.sim
@@ -143,7 +143,7 @@ sql insert into db.ctb values(now+2s, 1, 2, 3, 4)
sql select * from db.stb
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
if $rows != 3 then
return -1
@@ -200,7 +200,6 @@ sql insert into db.ctb values(now+3s, 1, 2, 3, 4, 5)
sql select * from db.stb
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
if $rows != 4 then
diff --git a/tests/script/tsim/stable/tag_add.sim b/tests/script/tsim/stable/tag_add.sim
index 01cc7bc36c..a7615df14c 100644
--- a/tests/script/tsim/stable/tag_add.sim
+++ b/tests/script/tsim/stable/tag_add.sim
@@ -129,7 +129,7 @@ sql select * from db.stb
sql select * from db.stb
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
if $rows != 1 then
return -1
@@ -160,7 +160,7 @@ sql insert into db.ctb2 values(now, 1, "2")
sql select * from db.stb where tbname = 'ctb2';
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
if $rows != 1 then
return -1
diff --git a/tests/script/tsim/stable/tag_drop.sim b/tests/script/tsim/stable/tag_drop.sim
index afac59daff..50907be23e 100644
--- a/tests/script/tsim/stable/tag_drop.sim
+++ b/tests/script/tsim/stable/tag_drop.sim
@@ -155,7 +155,7 @@ sql select * from db.stb
sql select * from db.stb
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
if $rows != 1 then
return -1
@@ -186,7 +186,7 @@ sql insert into db.ctb2 values(now, 1, "2")
sql select * from db.stb where tbname = 'ctb2';
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
if $rows != 1 then
return -1
@@ -219,7 +219,7 @@ sql select * from db.stb where tbname = 'ctb2';
sql select * from db.stb where tbname = 'ctb2';
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
if $rows != 1 then
return -1
@@ -251,7 +251,7 @@ sql insert into db.ctb3 values(now, 1, "2")
sql select * from db.stb where tbname = 'ctb3';
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
if $rows != 1 then
return -1
@@ -313,7 +313,7 @@ endi
sql select * from db.stb where tbname = 'ctb3';
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
-print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4] $data[1][5] $data[1][6]
+print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6]
if $rows != 1 then
return -1
diff --git a/tests/system-test/0-others/cachelast.py b/tests/system-test/0-others/cachelast.py
new file mode 100644
index 0000000000..7e912eda9a
--- /dev/null
+++ b/tests/system-test/0-others/cachelast.py
@@ -0,0 +1,148 @@
+import taos
+import sys ,os ,json
+import datetime
+import inspect
+import subprocess
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
+ "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
+ "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143}
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor(), True)
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files or "taosd.exe" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
+ def illegal_params(self):
+
+ illegal_params = ["1","0","NULL","None","False","True" ,"keep","now" ,"*" , "," ,"_" , "abc" ,"keep"]
+
+ for value in illegal_params:
+
+ tdSql.error("create database testdb replica 1 cachelast '%s' " %value)
+
+ unexpected_numbers = [-1 , 0.0 , 3.0 , 4, 10 , 100]
+
+ for number in unexpected_numbers:
+ tdSql.error("create database testdb replica 1 cachelast %s " %number)
+
+
+ def prepare_datas(self):
+ for i in range(4):
+ tdSql.execute("create database test_db_%d replica 1 cachelast %d " %(i,i))
+ tdSql.execute("use test_db_%d"%i)
+ tdSql.execute("create stable st(ts timestamp , c1 int ,c2 float ) tags(ind int) ")
+ tdSql.execute("create table tb1 using st tags(1) ")
+ tdSql.execute("create table tb2 using st tags(2) ")
+
+ for k in range(10):
+ tdSql.execute(" insert into tb1 values(now , %d, %f)" %(k,k*10) )
+ tdSql.execute(" insert into tb2 values(now , %d, %f)" %(k,k*10) )
+
+ def check_cache_last_sets(self):
+
+
+ # check cache_last value for database
+
+ tdSql.query(" show databases ")
+ databases_infos = tdSql.queryResult
+ cache_lasts = {}
+ for db_info in databases_infos:
+ dbname = db_info[0]
+ # print(dbname)
+ cache_last_value = db_info[16]
+ # print(cache_last_value)
+ if dbname in ["information_schema" , "performance_schema"]:
+ continue
+ cache_lasts[dbname]=cache_last_value
+
+
+ # cache_last_set value
+ for k , v in cache_lasts.items():
+
+ if k.split("_")[-1]==str(v):
+ tdLog.info(" database %s cache_last value check pass, value is %d "%(k,v) )
+ else:
+ tdLog.exit(" database %s cache_last value check fail, value is %d "%(k,v) )
+
+ # # check storage layer implementation
+
+
+ # buildPath = self.getBuildPath()
+ # if (buildPath == ""):
+ # tdLog.exit("taosd not found!")
+ # else:
+ # tdLog.info("taosd found in %s" % buildPath)
+ # dataPath = buildPath + "/../sim/dnode1/data"
+ # abs_vnodePath = os.path.abspath(dataPath)+"/vnode/"
+ # tdLog.info("abs_vnodePath: %s" % abs_vnodePath)
+
+ # tdSql.query(" show dnodes ")
+ # dnode_id = tdSql.queryResult[0][0]
+
+ # for dbname in cache_lasts.keys():
+ # print(dbname)
+ # tdSql.execute(" use %s" % dbname)
+ # tdSql.query(" show vgroups ")
+ # vgroups_infos = tdSql.queryResult
+ # for vgroup_info in vgroups_infos:
+ # vnode_json = abs_vnodePath + "/vnode" +f"{vgroup_info[0]}/" + "vnode.json"
+ # vnode_info_of_db = f"cat {vnode_json}"
+ # vnode_info = subprocess.check_output(vnode_info_of_db, shell=True).decode("utf-8")
+ # infoDict = json.loads(vnode_info)
+ # vnode_json_of_dbname = f"{dnode_id}."+ dbname
+ # config = infoDict["config"]
+ # if infoDict["config"]["dbname"] == vnode_json_of_dbname:
+ # if "cachelast" in infoDict["config"]:
+ # if int(infoDict["config"]["cachelast"]) != cache_lasts[dbname]:
+ # tdLog.exit("cachelast value is error in vnode.json of vnode%d "%(vgroup_info[0]))
+ # else:
+ # tdLog.exit("cachelast not found in vnode.json of vnode%d "%(vgroup_info[0]))
+
+ def restart_check_cache_last_sets(self):
+
+ for i in range(3):
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+ tdDnodes.start(index)
+ time.sleep(3)
+ self.check_cache_last_sets()
+
+
+ def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
+
+
+ self.illegal_params()
+ self.prepare_datas()
+ self.check_cache_last_sets()
+ self.restart_check_cache_last_sets()
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/7-tmq/db.py b/tests/system-test/7-tmq/db.py
new file mode 100644
index 0000000000..0115686798
--- /dev/null
+++ b/tests/system-test/7-tmq/db.py
@@ -0,0 +1,462 @@
+
+import taos
+import sys
+import time
+import socket
+import os
+import threading
+from enum import Enum
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+
+class actionType(Enum):
+ CREATE_DATABASE = 0
+ CREATE_STABLE = 1
+ CREATE_CTABLE = 2
+ INSERT_DATA = 3
+
+class TDTestCase:
+ hostname = socket.gethostname()
+ #rpcDebugFlagVal = '143'
+ #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
+ #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal
+ #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
+ #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal
+ #print ("===================: ", updatecfgDict)
+
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor())
+ #tdSql.init(conn.cursor(), logSql) # output sql.txt file
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
+ def newcur(self,cfg,host,port):
+ user = "root"
+ password = "taosdata"
+ con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
+ cur=con.cursor()
+ print(cur)
+ return cur
+
+ def initConsumerTable(self,cdbName='cdb'):
+ tdLog.info("create consume database, and consume info table, and consume result table")
+ tdSql.query("drop database if exists %s "%(cdbName))
+ tdSql.query("create database %s vgroups 1"%(cdbName))
+ tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
+ tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
+
+ tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
+ tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
+
+ def initConsumeContentTable(self,id=0,cdbName='cdb'):
+ tdSql.query("drop table if exists %s.content_%d "%(cdbName, id))
+ tdSql.query("create table %s.content_%d (ts timestamp, contentOfRow binary(1024))"%cdbName, id)
+
+ def initConsumerInfoTable(self,cdbName='cdb'):
+ tdLog.info("drop consumeinfo table")
+ tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
+ tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
+
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ sql = "insert into %s.consumeinfo values "%cdbName
+ sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
+ tdLog.info("consume info sql: %s"%sql)
+ tdSql.query(sql)
+
+ def selectConsumeResult(self,expectRows,cdbName='cdb'):
+ resultList=[]
+ while 1:
+ tdSql.query("select * from %s.consumeresult"%cdbName)
+ #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
+ if tdSql.getRows() == expectRows:
+ break
+ else:
+ time.sleep(5)
+
+ for i in range(expectRows):
+ tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
+ resultList.append(tdSql.getData(i , 3))
+
+ return resultList
+
+ def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
+ shellCmd = 'nohup '
+ if valgrind == 1:
+ logFile = cfgPath + '/../log/valgrind-tmq.log'
+ shellCmd = 'nohup valgrind --log-file=' + logFile
+ shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
+
+ shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> /dev/null 2>&1 &"
+ tdLog.info(shellCmd)
+ os.system(shellCmd)
+
+ def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1):
+ if dropFlag == 1:
+ tsql.execute("drop database if exists %s"%(dbName))
+
+ tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica))
+ tdLog.debug("complete to create database %s"%(dbName))
+ return
+
+ def create_stable(self,tsql, dbName,stbName):
+ tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName))
+ tdLog.debug("complete to create %s.%s" %(dbName, stbName))
+ return
+
+ def create_ctables(self,tsql, dbName,stbName,ctbPrefix,ctbNum):
+ tsql.execute("use %s" %dbName)
+ pre_create = "create table"
+ sql = pre_create
+ #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
+ for i in range(ctbNum):
+ sql += " %s_%d using %s tags(%d)"%(ctbPrefix,i,stbName,i+1)
+ if (i > 0) and (i%100 == 0):
+ tsql.execute(sql)
+ sql = pre_create
+ if sql != pre_create:
+ tsql.execute(sql)
+
+ tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
+ return
+
+ def insert_data_interlaceByMultiTbl(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0):
+ tdLog.debug("start to insert data ............")
+ tsql.execute("use %s" %dbName)
+ pre_insert = "insert into "
+ sql = pre_insert
+
+ if startTs == 0:
+ t = time.time()
+ startTs = int(round(t * 1000))
+
+ ctbDict = {}
+ for i in range(ctbNum):
+ ctbDict[i] = 0
+
+ #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
+ rowsOfCtb = 0
+ while rowsOfCtb < rowsPerTbl:
+ for i in range(ctbNum):
+ sql += " %s.%s_%d values "%(dbName,ctbPrefix,i)
+ for k in range(batchNum):
+ sql += "(%d, %d, 'tmqrow_%d') "%(startTs + ctbDict[i], ctbDict[i], ctbDict[i])
+ ctbDict[i] += 1
+ if (0 == ctbDict[i]%batchNum) or (ctbDict[i] == rowsPerTbl):
+ tsql.execute(sql)
+ sql = "insert into "
+ break
+ rowsOfCtb = ctbDict[0]
+
+ tdLog.debug("insert data ............ [OK]")
+ return
+
+ def insert_data(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0):
+ tdLog.debug("start to insert data ............")
+ tsql.execute("use %s" %dbName)
+ pre_insert = "insert into "
+ sql = pre_insert
+
+ if startTs == 0:
+ t = time.time()
+ startTs = int(round(t * 1000))
+
+ #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
+ rowsOfSql = 0
+ for i in range(ctbNum):
+ sql += " %s_%d values "%(ctbPrefix,i)
+ for j in range(rowsPerTbl):
+ sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j)
+ rowsOfSql += 1
+ if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)):
+ tsql.execute(sql)
+ rowsOfSql = 0
+ if j < rowsPerTbl - 1:
+ sql = "insert into %s_%d values " %(ctbPrefix,i)
+ else:
+ sql = "insert into "
+ #end sql
+ if sql != pre_insert:
+ #print("insert sql:%s"%sql)
+ tsql.execute(sql)
+ tdLog.debug("insert data ............ [OK]")
+ return
+
+ def insert_data_with_autoCreateTbl(self,tsql,dbName,stbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0):
+ tdLog.debug("start to insert data wiht auto create child table ............")
+ tsql.execute("use %s" %dbName)
+ pre_insert = "insert into "
+ sql = pre_insert
+
+ if startTs == 0:
+ t = time.time()
+ startTs = int(round(t * 1000))
+
+ #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
+ rowsOfSql = 0
+ for i in range(ctbNum):
+ sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i)
+ for j in range(rowsPerTbl):
+ sql += "(%d, %d, 'autodata_%d') "%(startTs + j, j, j)
+ rowsOfSql += 1
+ if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)):
+ tsql.execute(sql)
+ rowsOfSql = 0
+ if j < rowsPerTbl - 1:
+ sql = "insert into %s.%s_%d using %s.%s tags (%d) values " %(dbName,ctbPrefix,i,dbName,stbName,i)
+ else:
+ sql = "insert into "
+ #end sql
+ if sql != pre_insert:
+ #print("insert sql:%s"%sql)
+ tsql.execute(sql)
+ tdLog.debug("insert data ............ [OK]")
+ return
+
+ def prepareEnv(self, **parameterDict):
+ # create new connector for my thread
+ tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
+
+ if parameterDict["actionType"] == actionType.CREATE_DATABASE:
+ self.create_database(tsql, parameterDict["dbName"])
+ elif parameterDict["actionType"] == actionType.CREATE_STABLE:
+ self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"])
+ elif parameterDict["actionType"] == actionType.CREATE_CTABLE:
+ self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["stbName"], parameterDict["ctbNum"])
+ elif parameterDict["actionType"] == actionType.INSERT_DATA:
+ self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\
+ parameterDict["rowsPerTbl"],parameterDict["batchNum"])
+ else:
+ tdLog.exit("not support's action: ", parameterDict["actionType"])
+
+ return
+
+ def tmqCase1(self, cfgPath, buildPath):
+ tdLog.printNoPrefix("======== test case 1: ")
+ '''
+ subscribe one db, multi normal table which have not same schema, and include rows of all tables in one insert sql
+ '''
+ self.initConsumerTable()
+
+ # create and start thread
+ parameterDict = {'cfg': '', \
+ 'actionType': 0, \
+ 'dbName': 'db1', \
+ 'dropFlag': 1, \
+ 'vgroups': 4, \
+ 'replica': 1, \
+ 'stbName': 'stb1', \
+ 'ctbNum': 10, \
+ 'rowsPerTbl': 10000, \
+ 'batchNum': 100, \
+ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000
+ parameterDict['cfg'] = cfgPath
+
+ self.create_database(tdSql, parameterDict["dbName"])
+ tdSql.execute("create table %s.ntb0 (ts timestamp, c1 int)"%(parameterDict["dbName"]))
+ tdSql.execute("create table %s.ntb1 (ts timestamp, c1 int, c2 float)"%(parameterDict["dbName"]))
+ tdSql.execute("create table %s.ntb2 (ts timestamp, c1 int, c2 float, c3 binary(32))"%(parameterDict["dbName"]))
+ tdSql.execute("create table %s.ntb3 (ts timestamp, c1 int, c2 float, c3 binary(32), c4 timestamp)"%(parameterDict["dbName"]))
+
+ tdSql.execute("insert into %s.ntb0 values(now, 1) %s.ntb1 values(now, 1, 1) %s.ntb2 values(now, 1, 1, '1') %s.ntb3 values(now, 1, 1, '1', now)"%(parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"]))
+ tdSql.execute("insert into %s.ntb0 values(now, 2)(now+1s, 3) \
+ %s.ntb1 values(now, 2, 2)(now+1s, 3, 3) \
+ %s.ntb2 values(now, 2, 2, '2')(now+1s, 3, 3, '3') \
+ %s.ntb3 values(now, 2, 2, '2', now)(now+1s, 3, 3, '3', now)"\
+ %(parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"]))
+ tdSql.execute("insert into %s.ntb0 values(now, 4)(now+1s, 5) \
+ %s.ntb1 values(now, 4, 4)(now+1s, 5, 5) \
+ %s.ntb2 values(now, 4, 4, '4')(now+1s, 5, 5, '5') \
+ %s.ntb3 values(now, 4, 4, '4', now)(now+1s, 5, 5, '5', now) \
+ %s.ntb0 values(now+2s, 6)(now+3s, 7) \
+ %s.ntb1 values(now+2s, 6, 6)(now+3s, 7, 7) \
+ %s.ntb2 values(now+2s, 6, 6, '6')(now+3s, 7, 7, '7') \
+ %s.ntb3 values(now+2s, 6, 6, '6', now)(now+3s, 7, 7, '7', now)"\
+ %(parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"]))
+ numOfNtb = 4
+ rowsOfPerNtb = 7
+
+ tdLog.info("create topics from db")
+ topicFromDb = 'topic_db_mulit_tbl'
+
+ tdSql.execute("create topic %s as database %s" %(topicFromDb, parameterDict['dbName']))
+ consumerId = 0
+ expectrowcnt = numOfNtb * rowsOfPerNtb
+ topicList = topicFromDb
+ ifcheckdata = 0
+ ifManualCommit = 0
+ keyList = 'group.id:cgrp1,enable.auto.commit:false,\
+ auto.commit.interval.ms:6000,auto.offset.reset:earliest'
+ self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+
+ tdLog.info("start consume processor")
+ pollDelay = 10
+ showMsg = 1
+ showRow = 1
+ self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
+
+ tdLog.info("insert process end, and start to check consume result")
+ expectRows = 1
+ resultList = self.selectConsumeResult(expectRows)
+ totalConsumeRows = 0
+ for i in range(expectRows):
+ totalConsumeRows += resultList[i]
+
+ if totalConsumeRows != expectrowcnt:
+ tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
+ tdLog.exit("tmq consume rows error!")
+
+ tdSql.query("drop topic %s"%topicFromDb)
+
+ tdLog.printNoPrefix("======== test case 1 end ...... ")
+
+ def tmqCase2(self, cfgPath, buildPath):
+ tdLog.printNoPrefix("======== test case 2: ")
+ '''
+ subscribe one stb, multi child talbe and normal table which have not same schema, and include rows of all tables in one insert sql
+ '''
+ self.initConsumerTable()
+
+ # create and start thread
+ parameterDict = {'cfg': '', \
+ 'actionType': 0, \
+ 'dbName': 'db2', \
+ 'dropFlag': 1, \
+ 'vgroups': 4, \
+ 'replica': 1, \
+ 'stbName': 'stb1', \
+ 'ctbNum': 10, \
+ 'rowsPerTbl': 10000, \
+ 'batchNum': 100, \
+ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000
+ parameterDict['cfg'] = cfgPath
+
+ dbName = parameterDict["dbName"]
+
+ self.create_database(tdSql, dbName)
+
+ tdSql.execute("create stable %s.stb (ts timestamp, s1 bigint, s2 binary(32), s3 double) tags (t1 int, t2 binary(32))"%(dbName))
+ tdSql.execute("create table %s.ctb0 using %s.stb tags(0, 'ctb0')"%(dbName,dbName))
+ tdSql.execute("create table %s.ctb1 using %s.stb tags(1, 'ctb1')"%(dbName,dbName))
+
+ tdSql.execute("create table %s.ntb0 (ts timestamp, c1 binary(32))"%(dbName))
+ tdSql.execute("create table %s.ntb1 (ts timestamp, c1 binary(32), c2 float)"%(dbName))
+ tdSql.execute("create table %s.ntb2 (ts timestamp, c1 int, c2 float, c3 binary(32))"%(dbName))
+ tdSql.execute("create table %s.ntb3 (ts timestamp, c1 int, c2 float, c3 binary(32), c4 timestamp)"%(dbName))
+
+ tdSql.execute("insert into %s.ntb0 values(now, 'ntb0-11') \
+ %s.ntb1 values(now, 'ntb1', 11) \
+ %s.ntb2 values(now, 11, 11, 'ntb2') \
+ %s.ctb0 values(now, 11, 'ctb0', 11) \
+ %s.ntb3 values(now, 11, 11, 'ntb3', now) \
+ %s.ctb1 values(now, 11, 'ctb1', 11)"\
+ %(dbName,dbName,dbName,dbName,dbName,dbName))
+
+ tdSql.execute("insert into %s.ntb0 values(now, 'ntb0-12')(now+1s, 'ntb0-13') \
+ %s.ntb1 values(now, 'ntb1', 12)(now+1s, 'ntb1', 13) \
+ %s.ntb2 values(now, 12, 12, 'ntb2')(now+1s, 13, 13, 'ntb2') \
+ %s.ctb0 values(now, 12, 'ctb0', 12)(now+1s, 13, 'ctb0', 13) \
+ %s.ntb3 values(now, 12, 12, 'ntb3', now)(now+1s, 13, 13, 'ntb3', now) \
+ %s.ctb1 values(now, 12, 'ctb1', 12)(now+1s, 13, 'ctb1', 13)"\
+ %(dbName,dbName,dbName,dbName,dbName,dbName))
+ tdSql.execute("insert into %s.ntb0 values(now, 'ntb0-14')(now+1s, 'ntb0-15') \
+ %s.ntb1 values(now, 'ntb1', 14)(now+1s, 'ntb1', 15) \
+ %s.ntb2 values(now, 14, 14, 'ntb2')(now+1s, 15, 15, 'ntb2') \
+ %s.ctb0 values(now, 14, 'ctb0', 14)(now+1s, 15, 'ctb0', 15) \
+ %s.ntb3 values(now, 14, 14, 'ntb3', now)(now+1s, 15, 15, 'ntb3', now) \
+ %s.ctb1 values(now, 14, 'ctb1', 14)(now+1s, 15, 'ctb1', 15) \
+ %s.ntb0 values(now+2s, 'ntb0-16')(now+3s, 'ntb0-17') \
+ %s.ntb1 values(now+2s, 'ntb1', 16)(now+3s, 'ntb1', 17) \
+ %s.ntb2 values(now+2s, 16, 16, 'ntb2')(now+3s, 17, 17, 'ntb2') \
+ %s.ctb0 values(now+2s, 16, 'ctb0', 16)(now+3s, 17, 'ctb0', 17) \
+ %s.ntb3 values(now+2s, 16, 16, 'ntb3', now)(now+3s, 17, 17, 'ntb3', now) \
+ %s.ctb1 values(now+2s, 16, 'ctb1', 16)(now+3s, 17, 'ctb1', 17)"\
+ %(dbName,dbName,dbName,dbName,dbName,dbName,dbName,dbName,dbName,dbName,dbName,dbName))
+ numOfNtb = 4
+ numOfCtb = 2
+ rowsOfPerNtb = 7
+
+ tdLog.info("create topics from db")
+ topicFromStb = 'topic_stb_mulit_tbl'
+
+ tdSql.execute("create topic %s as stable %s.stb" %(topicFromStb, dbName))
+ consumerId = 0
+ expectrowcnt = numOfCtb * rowsOfPerNtb
+ topicList = topicFromStb
+ ifcheckdata = 0
+ ifManualCommit = 0
+ keyList = 'group.id:cgrp1,enable.auto.commit:false,\
+ auto.commit.interval.ms:6000,auto.offset.reset:earliest'
+ self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+
+ tdLog.info("start consume processor")
+ pollDelay = 10
+ showMsg = 1
+ showRow = 1
+ self.startTmqSimProcess(buildPath,cfgPath,pollDelay,dbName,showMsg, showRow)
+
+ tdLog.info("insert process end, and start to check consume result")
+ expectRows = 1
+ resultList = self.selectConsumeResult(expectRows)
+ totalConsumeRows = 0
+ for i in range(expectRows):
+ totalConsumeRows += resultList[i]
+
+ if totalConsumeRows != expectrowcnt:
+ tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
+ tdLog.exit("tmq consume rows error!")
+
+ tdSql.query("drop topic %s"%topicFromStb)
+
+ tdLog.printNoPrefix("======== test case 2 end ...... ")
+
+ def tmqCase3(self, cfgPath, buildPath):
+ tdLog.printNoPrefix("======== test case 3: ")
+
+ tdLog.printNoPrefix("======== test case 3 end ...... ")
+
+
+ def run(self):
+ tdSql.prepare()
+
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosd not found!")
+ else:
+ tdLog.info("taosd found in %s" % buildPath)
+ cfgPath = buildPath + "/../sim/psim/cfg"
+ tdLog.info("cfgPath: %s" % cfgPath)
+
+ # self.tmqCase1(cfgPath, buildPath)
+ self.tmqCase2(cfgPath, buildPath)
+ # self.tmqCase3(cfgPath, buildPath)
+ # self.tmqCase4(cfgPath, buildPath)
+ # self.tmqCase5(cfgPath, buildPath)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+event = threading.Event()
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/7-tmq/schema.py b/tests/system-test/7-tmq/schema.py
new file mode 100644
index 0000000000..633a097db6
--- /dev/null
+++ b/tests/system-test/7-tmq/schema.py
@@ -0,0 +1,700 @@
+
+import taos
+import sys
+import time
+import socket
+import os
+import threading
+from enum import Enum
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+
+class actionType(Enum):
+ CREATE_DATABASE = 0
+ CREATE_STABLE = 1
+ CREATE_CTABLE = 2
+ INSERT_DATA = 3
+
+class TDTestCase:
+ hostname = socket.gethostname()
+ #rpcDebugFlagVal = '143'
+ #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
+ #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal
+ #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''}
+ #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal
+ #print ("===================: ", updatecfgDict)
+
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor())
+ #tdSql.init(conn.cursor(), logSql) # output sql.txt file
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
+ def newcur(self,cfg,host,port):
+ user = "root"
+ password = "taosdata"
+ con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port)
+ cur=con.cursor()
+ print(cur)
+ return cur
+
+ def initConsumerTable(self,cdbName='cdb'):
+ tdLog.info("create consume database, and consume info table, and consume result table")
+ tdSql.query("drop database if exists %s "%(cdbName))
+ tdSql.query("create database %s vgroups 1"%(cdbName))
+ tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
+ tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
+
+ tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
+ tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
+
+ def initConsumeContentTable(self,id=0,cdbName='cdb'):
+ tdSql.query("drop table if exists %s.content_%d "%(cdbName, id))
+ tdSql.query("create table %s.content_%d (ts timestamp, contentOfRow binary(1024))"%cdbName, id)
+
+ def initConsumerInfoTable(self,cdbName='cdb'):
+ tdLog.info("drop consumeinfo table")
+ tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
+ tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
+
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
+ sql = "insert into %s.consumeinfo values "%cdbName
+ sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
+ tdLog.info("consume info sql: %s"%sql)
+ tdSql.query(sql)
+
+ def selectConsumeResult(self,expectRows,cdbName='cdb'):
+ resultList=[]
+ while 1:
+ tdSql.query("select * from %s.consumeresult"%cdbName)
+ #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
+ if tdSql.getRows() == expectRows:
+ break
+ else:
+ time.sleep(5)
+
+ for i in range(expectRows):
+ tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
+ resultList.append(tdSql.getData(i , 3))
+
+ return resultList
+
+ def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
+ shellCmd = 'nohup '
+ if valgrind == 1:
+ logFile = cfgPath + '/../log/valgrind-tmq.log'
+ shellCmd = 'nohup valgrind --log-file=' + logFile
+ shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes '
+
+ shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath
+ shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
+ shellCmd += "> /dev/null 2>&1 &"
+ tdLog.info(shellCmd)
+ os.system(shellCmd)
+
+ def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1):
+ if dropFlag == 1:
+ tsql.execute("drop database if exists %s"%(dbName))
+
+ tsql.execute("create database if not exists %s vgroups %d replica %d"%(dbName, vgroups, replica))
+ tdLog.debug("complete to create database %s"%(dbName))
+ return
+
+ def create_stable(self,tsql, dbName,stbName):
+ tsql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(dbName, stbName))
+ tdLog.debug("complete to create %s.%s" %(dbName, stbName))
+ return
+
+ def create_ctables(self,tsql, dbName,stbName,ctbPrefix,ctbNum):
+ tsql.execute("use %s" %dbName)
+ pre_create = "create table"
+ sql = pre_create
+ #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
+ for i in range(ctbNum):
+ sql += " %s_%d using %s tags(%d)"%(ctbPrefix,i,stbName,i+1)
+ if (i > 0) and (i%100 == 0):
+ tsql.execute(sql)
+ sql = pre_create
+ if sql != pre_create:
+ tsql.execute(sql)
+
+ tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
+ return
+
+ def insert_data_interlaceByMultiTbl(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0):
+ tdLog.debug("start to insert data ............")
+ tsql.execute("use %s" %dbName)
+ pre_insert = "insert into "
+ sql = pre_insert
+
+ if startTs == 0:
+ t = time.time()
+ startTs = int(round(t * 1000))
+
+ ctbDict = {}
+ for i in range(ctbNum):
+ ctbDict[i] = 0
+
+ #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
+ rowsOfCtb = 0
+ while rowsOfCtb < rowsPerTbl:
+ for i in range(ctbNum):
+ sql += " %s.%s_%d values "%(dbName,ctbPrefix,i)
+ for k in range(batchNum):
+ sql += "(%d, %d, 'tmqrow_%d') "%(startTs + ctbDict[i], ctbDict[i], ctbDict[i])
+ ctbDict[i] += 1
+ if (0 == ctbDict[i]%batchNum) or (ctbDict[i] == rowsPerTbl):
+ tsql.execute(sql)
+ sql = "insert into "
+ break
+ rowsOfCtb = ctbDict[0]
+
+ tdLog.debug("insert data ............ [OK]")
+ return
+
+ def insert_data(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0):
+ tdLog.debug("start to insert data ............")
+ tsql.execute("use %s" %dbName)
+ pre_insert = "insert into "
+ sql = pre_insert
+
+ if startTs == 0:
+ t = time.time()
+ startTs = int(round(t * 1000))
+
+ #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
+ rowsOfSql = 0
+ for i in range(ctbNum):
+ sql += " %s_%d values "%(ctbPrefix,i)
+ for j in range(rowsPerTbl):
+ sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j)
+ rowsOfSql += 1
+ if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)):
+ tsql.execute(sql)
+ rowsOfSql = 0
+ if j < rowsPerTbl - 1:
+ sql = "insert into %s_%d values " %(ctbPrefix,i)
+ else:
+ sql = "insert into "
+ #end sql
+ if sql != pre_insert:
+ #print("insert sql:%s"%sql)
+ tsql.execute(sql)
+ tdLog.debug("insert data ............ [OK]")
+ return
+
+ def insert_data_with_autoCreateTbl(self,tsql,dbName,stbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs=0):
+ tdLog.debug("start to insert data wiht auto create child table ............")
+ tsql.execute("use %s" %dbName)
+ pre_insert = "insert into "
+ sql = pre_insert
+
+ if startTs == 0:
+ t = time.time()
+ startTs = int(round(t * 1000))
+
+ #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
+ rowsOfSql = 0
+ for i in range(ctbNum):
+ sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i)
+ for j in range(rowsPerTbl):
+ sql += "(%d, %d, 'autodata_%d') "%(startTs + j, j, j)
+ rowsOfSql += 1
+ if (j > 0) and ((rowsOfSql == batchNum) or (j == rowsPerTbl - 1)):
+ tsql.execute(sql)
+ rowsOfSql = 0
+ if j < rowsPerTbl - 1:
+ sql = "insert into %s.%s_%d using %s.%s tags (%d) values " %(dbName,ctbPrefix,i,dbName,stbName,i)
+ else:
+ sql = "insert into "
+ #end sql
+ if sql != pre_insert:
+ #print("insert sql:%s"%sql)
+ tsql.execute(sql)
+ tdLog.debug("insert data ............ [OK]")
+ return
+
+ def prepareEnv(self, **parameterDict):
+ # create new connector for my thread
+ tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030)
+
+ if parameterDict["actionType"] == actionType.CREATE_DATABASE:
+ self.create_database(tsql, parameterDict["dbName"])
+ elif parameterDict["actionType"] == actionType.CREATE_STABLE:
+ self.create_stable(tsql, parameterDict["dbName"], parameterDict["stbName"])
+ elif parameterDict["actionType"] == actionType.CREATE_CTABLE:
+ self.create_ctables(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["stbName"], parameterDict["ctbNum"])
+ elif parameterDict["actionType"] == actionType.INSERT_DATA:
+ self.insert_data(tsql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"],\
+ parameterDict["rowsPerTbl"],parameterDict["batchNum"])
+ else:
+ tdLog.exit("not support's action: ", parameterDict["actionType"])
+
+ return
+
+ def tmqCase1(self, cfgPath, buildPath):
+ tdLog.printNoPrefix("======== test case 1: ")
+ parameterDict = {'cfg': '', \
+ 'actionType': 0, \
+ 'dbName': 'db1', \
+ 'dropFlag': 1, \
+ 'vgroups': 4, \
+ 'replica': 1, \
+ 'stbName': 'stb1', \
+ 'ctbPrefix': 'stb1', \
+ 'ctbNum': 10, \
+ 'rowsPerTbl': 10000, \
+ 'batchNum': 23, \
+ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000
+ parameterDict['cfg'] = cfgPath
+
+ tdLog.info("create database, super table, child table, normal table")
+ ntbName = 'ntb1'
+ self.create_database(tdSql, parameterDict["dbName"])
+ # self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"])
+ # self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"])
+ # self.insert_data(tdSql,parameterDict["dbName"],parameterDict["stbName"],parameterDict["ctbNum"],parameterDict["rowsPerTbl"],parameterDict["batchNum"])
+ tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict["stbName"]))
+ tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10))"%(parameterDict["dbName"],ntbName))
+
+ tdLog.info("create topics from super table and normal table")
+ columnTopicFromStb = 'column_topic_from_stb1'
+ columnTopicFromNtb = 'column_topic_from_ntb1'
+
+ tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s" %(columnTopicFromStb, parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(columnTopicFromNtb, parameterDict['dbName'], ntbName))
+
+ tsLog.info("======== super table test:")
+ # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
+ tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t1"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s modify tag t2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t1 t1new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t2 t2new"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic
+ tdSql.query("alter table %s.%s modify column c4 binary(60)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s modify tag t4 binary(60)"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.query("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s rename tag t3 t3new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s rename tag t4 t4new"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.query("alter table %s.%s drop column c3new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s drop column c4new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s drop tag t3new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s drop tag t4new"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.query("alter table %s.%s add column c3 int"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s add column c4 float"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s add tag t3 int"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s add tag t4 float"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tsLog.info("======== normal table test:")
+ # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
+ tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], ntbName))
+ tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], ntbName))
+
+ tdSql.error("alter table %s.%s modify column c2 binary(60)"%(parameterDict['dbName'], ntbName))
+
+ tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], ntbName))
+ tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], ntbName))
+
+ # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic
+ tdSql.query("alter table %s.%s modify column c4 binary(60)"%(parameterDict['dbName'], ntbName))
+
+ tdSql.query("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], ntbName))
+ tdSql.query("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], ntbName))
+
+ tdSql.query("alter table %s.%s drop column c3new"%(parameterDict['dbName'], ntbName))
+ tdSql.query("alter table %s.%s drop column c4new"%(parameterDict['dbName'], ntbName))
+
+ tdSql.query("alter table %s.%s add column c3 int"%(parameterDict['dbName'], ntbName))
+ tdSql.query("alter table %s.%s add column c4 float"%(parameterDict['dbName'], ntbName))
+
+ tdLog.info("======== child table test:")
+ parameterDict['stbName'] = 'stb12'
+ ctbName = 'stb12_0'
+ tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
+ tdSql.query("create table %s.%s using %s.%s tags (1, '2', 3, '4', '5')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName']))
+
+ tdLog.info("create topics from child table")
+ columnTopicFromCtb = 'column_topic_from_ctb1'
+
+ tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s" %(columnTopicFromCtb,parameterDict['dbName'],ctbName))
+
+ # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
+ tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t1"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s modify tag t2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.error("alter table %s.%s set tag t1 10"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s set tag t2 '20'"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t1 t1new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t2 t2new"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic
+ tdSql.query("alter table %s.%s modify column c4 binary(60)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s modify tag t4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.query("alter table %s.%s set tag t3 30"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s set tag t4 '40'"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s set tag t5 '50'"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.query("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s rename tag t3 t3new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s rename tag t4 t4new"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.query("alter table %s.%s drop column c3new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s drop column c4new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s drop tag t3new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s drop tag t4new"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.query("alter table %s.%s add column c3 int"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s add column c4 float"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s add tag t3 int"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s add tag t4 float"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdLog.printNoPrefix("======== test case 1 end ...... ")
+
+ def tmqCase2(self, cfgPath, buildPath):
+ tdLog.printNoPrefix("======== test case 2: ")
+ parameterDict = {'cfg': '', \
+ 'actionType': 0, \
+ 'dbName': 'db1', \
+ 'dropFlag': 1, \
+ 'vgroups': 4, \
+ 'replica': 1, \
+ 'stbName': 'stb2', \
+ 'ctbPrefix': 'stb2', \
+ 'ctbNum': 10, \
+ 'rowsPerTbl': 10000, \
+ 'batchNum': 23, \
+ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000
+ parameterDict['cfg'] = cfgPath
+
+ # tdLog.info("create database, super table, child table, normal table")
+ ntbName = 'ntb2'
+ tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict["stbName"]))
+ tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10))"%(parameterDict["dbName"],ntbName))
+
+ tdLog.info("create topics from super table and normal table")
+ columnTopicFromStb = 'column_topic_from_stb2'
+ columnTopicFromNtb = 'column_topic_from_ntb2'
+
+ tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s where c3 > 3 and c4 like 'abc' and t3 = 5 and t4 = 'beijing'" %(columnTopicFromStb, parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s where c3 > 3 and c4 like 'abc'" %(columnTopicFromNtb, parameterDict['dbName'], ntbName))
+
+ tsLog.info("======== super table test:")
+ # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
+ tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t1"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t3"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t4"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s modify column c4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s modify tag t2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s modify tag t4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t1 t1new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t2 t2new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t3 t3new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t4 t4new"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic
+ tdSql.query("alter table %s.%s modify column c5 nchar(60)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s modify tag t5 nchar(60)"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.query("alter table %s.%s rename column c5 c5new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s rename tag t5 t5new"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.query("alter table %s.%s drop column c5new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s drop tag t5new"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.query("alter table %s.%s add column c5 int"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s add tag t5 float"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tsLog.info("======== normal table test:")
+ # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
+ tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], ntbName))
+ tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], ntbName))
+ tdSql.error("alter table %s.%s drop column c3"%(parameterDict['dbName'], ntbName))
+ tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], ntbName))
+
+ tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], ntbName))
+ tdSql.error("alter table %s.%s modify column c4 binary(40)"%(parameterDict['dbName'], ntbName))
+
+ tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], ntbName))
+ tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], ntbName))
+ tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], ntbName))
+ tdSql.error("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], ntbName))
+
+ # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic
+ tdSql.query("alter table %s.%s modify column c5 nchar(60)"%(parameterDict['dbName'], ntbName))
+
+ tdSql.query("alter table %s.%s rename column c5 c5new"%(parameterDict['dbName'], ntbName))
+
+ tdSql.query("alter table %s.%s drop column c5new"%(parameterDict['dbName'], ntbName))
+
+ tdSql.query("alter table %s.%s add column c5 float"%(parameterDict['dbName'], ntbName))
+
+ tdLog.info("======== child table test:")
+ parameterDict['stbName'] = 'stb21'
+ ctbName = 'stb21_0'
+ tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
+ tdSql.query("create table %s.%s using %s.%s tags (1, '2', 3, '4', '5')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName']))
+
+ tdLog.info("create topics from child table")
+ columnTopicFromCtb = 'column_topic_from_ctb2'
+
+ tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s where c3 > 3 and c4 like 'abc' and t3 = 5 and t4 = 'beijing'" %(columnTopicFromCtb,parameterDict['dbName'],ctbName))
+
+ # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
+ tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t1"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t3"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t4"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s modify column c4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s modify tag t2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s modify tag t4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.error("alter table %s.%s set tag t1 11"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s set tag t2 '22'"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s set tag t3 33"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s set tag t4 '44'"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t1 t1new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t2 t2new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t3 t3new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t4 t4new"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic
+ tdSql.query("alter table %s.%s modify column c5 nchar(60)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s modify tag t5 nchar(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.query("alter table %s.%s set tag t5 '50'"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.query("alter table %s.%s rename column c5 c5new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s rename tag t5 t5new"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.query("alter table %s.%s drop column c5new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s drop tag t5new"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.query("alter table %s.%s add column c5 float"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s add tag t5 float"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdLog.printNoPrefix("======== test case 2 end ...... ")
+
+ def tmqCase3(self, cfgPath, buildPath):
+ tdLog.printNoPrefix("======== test case 3: ")
+ parameterDict = {'cfg': '', \
+ 'actionType': 0, \
+ 'dbName': 'db1', \
+ 'dropFlag': 1, \
+ 'vgroups': 4, \
+ 'replica': 1, \
+ 'stbName': 'stb3', \
+ 'ctbPrefix': 'stb3', \
+ 'ctbNum': 10, \
+ 'rowsPerTbl': 10000, \
+ 'batchNum': 23, \
+ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000
+ parameterDict['cfg'] = cfgPath
+
+ # tdLog.info("create database, super table, child table, normal table")
+ ntbName = 'ntb3'
+ tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict["stbName"]))
+ tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10))"%(parameterDict["dbName"],ntbName))
+
+ tdLog.info("create topics from super table and normal table")
+ columnTopicFromStb = 'star_topic_from_stb3'
+ columnTopicFromNtb = 'star_topic_from_ntb3'
+
+ tdSql.execute("create topic %s as select * from %s.%s" %(columnTopicFromStb, parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.execute("create topic %s as select * from %s.%s " %(columnTopicFromNtb, parameterDict['dbName'], ntbName))
+
+ tsLog.info("======== super table test:")
+ # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
+ tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t1"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t3"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t4"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t5"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s modify column c4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s modify column c5 nchar(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s modify tag t2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s modify tag t4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s modify tag t5 nchar(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename column c5 c5new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t1 t1new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t2 t2new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t3 t3new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t4 t4new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t5 t5new"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic
+ tdSql.query("alter table %s.%s add column c6 int"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s add tag t6 float"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tsLog.info("======== normal table test:")
+ # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
+ tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], ntbName))
+ tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], ntbName))
+ tdSql.error("alter table %s.%s drop column c3"%(parameterDict['dbName'], ntbName))
+ tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], ntbName))
+ tdSql.error("alter table %s.%s drop column c5"%(parameterDict['dbName'], ntbName))
+
+ tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], ntbName))
+ tdSql.error("alter table %s.%s modify column c4 binary(40)"%(parameterDict['dbName'], ntbName))
+ tdSql.error("alter table %s.%s modify column c5 nchar(40)"%(parameterDict['dbName'], ntbName))
+
+ tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], ntbName))
+ tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], ntbName))
+ tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], ntbName))
+ tdSql.error("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], ntbName))
+ tdSql.error("alter table %s.%s rename column c5 c5new"%(parameterDict['dbName'], ntbName))
+
+ # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic
+ tdSql.query("alter table %s.%s add column c6 float"%(parameterDict['dbName'], ntbName))
+
+ tdLog.info("======== child table test:")
+ parameterDict['stbName'] = 'stb31'
+ ctbName = 'stb31_0'
+ tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName']))
+ tdSql.query("create table %s.%s using %s.%s tags (10, 100, '1000')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName']))
+
+ tdLog.info("create topics from child table")
+ columnTopicFromCtb = 'column_topic_from_ctb3'
+
+ tdSql.execute("create topic %s as select * from %s.%s " %(columnTopicFromCtb,parameterDict['dbName'],ctbName))
+
+ # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic
+ tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t1"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t3"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t4"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s drop tag t5"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s modify column c4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s modify column c5 nchar(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s modify tag t2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s modify tag t4 binary(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s modify tag t5 nchar(40)"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.error("alter table %s.%s set tag t1 10"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s set tag t2 '20'"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s set tag t3 30"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s set tag t4 '40'"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s set tag t5 '50'"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename column c4 c4new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename column c5 c5new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t1 t1new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t2 t2new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t3 t3new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t4 t4new"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.error("alter table %s.%s rename tag t5 t5new"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic
+ tdSql.query("alter table %s.%s add column c6 float"%(parameterDict['dbName'], parameterDict['stbName']))
+ tdSql.query("alter table %s.%s add tag t6 float"%(parameterDict['dbName'], parameterDict['stbName']))
+
+ tdLog.printNoPrefix("======== test case 3 end ...... ")
+
+ def run(self):
+ tdSql.prepare()
+
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosd not found!")
+ else:
+ tdLog.info("taosd found in %s" % buildPath)
+ cfgPath = buildPath + "/../sim/psim/cfg"
+ tdLog.info("cfgPath: %s" % cfgPath)
+
+ self.tmqCase1(cfgPath, buildPath)
+ self.tmqCase2(cfgPath, buildPath)
+ self.tmqCase3(cfgPath, buildPath)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+event = threading.Event()
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh
index 517febd195..45e25032cf 100644
--- a/tests/system-test/fulltest.sh
+++ b/tests/system-test/fulltest.sh
@@ -10,6 +10,7 @@ python3 ./test.py -f 0-others/taosdMonitor.py
python3 ./test.py -f 0-others/udfTest.py
python3 ./test.py -f 0-others/udf_create.py
python3 ./test.py -f 0-others/udf_restart_taosd.py
+python3 ./test.py -f 0-others/cachelast.py
python3 ./test.py -f 0-others/user_control.py
python3 ./test.py -f 0-others/fsync.py