diff --git a/docs/en/20-third-party/01-grafana.mdx b/docs/en/20-third-party/01-grafana.mdx index 9076f163be..5dbeb31a23 100644 --- a/docs/en/20-third-party/01-grafana.mdx +++ b/docs/en/20-third-party/01-grafana.mdx @@ -31,38 +31,41 @@ TDengine currently supports Grafana versions 7.5 and above. Users can go to the ### Install Grafana Plugin and Configure Data Source - + -Set the url and authorization environment variables by `export` or a [`.env`(dotenv) file](https://hexdocs.pm/dotenvy/dotenv-file-format.html): +Under Grafana 8, plugin catalog allows you to [browse and manage plugins within Grafana](https://grafana.com/docs/grafana/next/administration/plugin-management/#plugin-catalog) (but for Grafana 7.x, use **With Script** or **Install & Configure Manually**). Find the page at **Configurations > Plugins**, search **TDengine** and click it to install. -```sh -export TDENGINE_API=http://tdengine.local:6041 -# user + password -export TDENGINE_USER=user -export TDENGINE_PASSWORD=password +![Search tdengine in grafana plugins](./grafana/grafana-plugin-search-tdengine.png) -# Other useful variables -# - If to install TDengine data source, default is true -export TDENGINE_DS_ENABLED=false -# - Data source name to be created, default is TDengine -export TDENGINE_DS_NAME=TDengine -# - Data source organization id, default is 1 -export GF_ORG_ID=1 -# - Data source is editable in admin ui or not, default is 0 (false) -export TDENGINE_EDITABLE=1 -``` +Installation may cost some minutes, then you can **Create a TDengine data source**: -Run `install.sh`: +![Install and configure Grafana data source](./grafana/grafana-install-and-config.png) -```sh -bash -c "$(curl -fsSL https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -``` +Then you can add a TDengine data source by filling up the configuration options. -With this script, TDengine data source plugin and the Grafana data source will be installed and created automatically with Grafana provisioning configurations. Save the script and type `./install.sh --help` for the full usage of the script. +![TDengine Database Grafana plugin add data source](./grafana/grafana-data-source.png) -And then, restart Grafana service and open Grafana in web-browser, usually . +You can create dashboards with TDengine now. + + +On a server with Grafana installed, run `install.sh` with TDengine url and username/passwords will install TDengine data source plugin and add a data source named TDengine. This is the recommended way for Grafana 7.x or [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) users. + +```sh +bash -c "$(curl -fsSL \ + https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -- \ + -a http://localhost:6041 \ + -u root \ + -p taosdata +``` + +Restart Grafana service and open Grafana in web-browser, usually . + +Save the script and type `./install.sh --help` for the full usage of the script. + + + Follow the installation steps in [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) with the [``grafana-cli`` command-line tool](https://grafana.com/docs/grafana/latest/administration/cli/) for plugin installation. @@ -115,6 +118,73 @@ Click `Save & Test` to test. You should see a success message if the test worked ![TDengine Database TDinsight plugin add database 4](./grafana/add_datasource4.webp) + + + +Please refer to [Install plugins in the Docker container](https://grafana.com/docs/grafana/next/setup-grafana/installation/docker/#install-plugins-in-the-docker-container). This will install `tdengine-datasource` plugin when Grafana container starts: + +```bash +docker run -d \ + -p 3000:3000 \ + --name=grafana \ + -e "GF_INSTALL_PLUGINS=tdengine-datasource" \ + grafana/grafana +``` + +You can setup a zero-configuration stack for TDengine + Grafana by [docker-compose](https://docs.docker.com/compose/) and [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) file: + +1. Save the provisioning configuration file to `tdengine.yml`. + + ```yml + apiVersion: 1 + datasources: + - name: TDengine + type: tdengine-datasource + orgId: 1 + url: "$TDENGINE_API" + isDefault: true + secureJsonData: + url: "$TDENGINE_URL" + basicAuth: "$TDENGINE_BASIC_AUTH" + token: "$TDENGINE_CLOUD_TOKEN" + version: 1 + editable: true + ``` + +2. Write `docker-compose.yml` with [TDengine](https://hub.docker.com/r/tdengine/tdengine) and [Grafana](https://hub.docker.com/r/grafana/grafana) image. + + ```yml + version: "3.7" + + services: + tdengine: + image: tdengine/tdengine:2.6.0.2 + environment: + TAOS_FQDN: tdengine + volumes: + - tdengine-data:/var/lib/taos/ + grafana: + image: grafana/grafana:8.5.6 + volumes: + - ./tdengine.yml/:/etc/grafana/provisioning/tdengine.yml + - grafana-data:/var/lib/grafana + environment: + # install tdengine plugin at start + GF_INSTALL_PLUGINS: "tdengine-datasource" + TDENGINE_URL: "http://tdengine:6041" + #printf "$TDENGINE_USER:$TDENGINE_PASSWORD" | base64 + TDENGINE_BASIC_AUTH: "cm9vdDp0YmFzZTEyNQ==" + ports: + - 3000:3000 + volumes: + grafana-data: + tdengine-data: + ``` + +3. Start TDengine and Grafana services: `docker-compose up -d`. + +Open Grafana , and you can add dashboard with TDengine now. + diff --git a/docs/en/20-third-party/grafana/grafana-data-source.png b/docs/en/20-third-party/grafana/grafana-data-source.png new file mode 100644 index 0000000000..989ffcca0b Binary files /dev/null and b/docs/en/20-third-party/grafana/grafana-data-source.png differ diff --git a/docs/en/20-third-party/grafana/grafana-install-and-config.png b/docs/en/20-third-party/grafana/grafana-install-and-config.png new file mode 100644 index 0000000000..b918da8b2d Binary files /dev/null and b/docs/en/20-third-party/grafana/grafana-install-and-config.png differ diff --git a/docs/en/20-third-party/grafana/grafana-plugin-search-tdengine.png b/docs/en/20-third-party/grafana/grafana-plugin-search-tdengine.png new file mode 100644 index 0000000000..cf3b66977b Binary files /dev/null and b/docs/en/20-third-party/grafana/grafana-plugin-search-tdengine.png differ diff --git a/docs/zh/20-third-party/01-grafana.mdx b/docs/zh/20-third-party/01-grafana.mdx index 09c0d786cf..93090ffd38 100644 --- a/docs/zh/20-third-party/01-grafana.mdx +++ b/docs/zh/20-third-party/01-grafana.mdx @@ -29,39 +29,41 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/ ### 安装 Grafana Plugin 并配置数据源 + + +使用 Grafana 最新版本(8.5+),您可以在 Grafana 中[浏览和管理插件](https://grafana.com/docs/grafana/next/administration/plugin-management/#plugin-catalog)(对于 7.x 版本,请使用 **安装脚本** 或 **手动安装并配置** 方式)。在 Grafana 管理界面中的 **Configurations > Plugins** 页面直接搜索并按照提示安装 TDengine。 + +![Search tdengine in grafana plugins](grafana-plugin-search-tdengine.png) + +如图示即安装完毕,按照指示 **Create a TDengine data source** 添加数据源。 + +![Install and configure Grafana data source](grafana-install-and-config.png) + +输入 TDengine 相关配置,完成数据源配置。 + +![TDengine Database Grafana plugin add data source](./grafana-data-source.png) + +配置完毕,现在可以使用 TDengine 创建 Dashboard 了。 + + -将集群信息设置为环境变量;也可以使用 `.env` 文件,请参考 [dotenv](https://hexdocs.pm/dotenvy/dotenv-file-format.html): +对于使用 Grafana 7.x 版本或使用 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置的用户,可以在 Grafana 服务器上使用安装脚本自动安装插件即添加数据源 Provisioning 配置文件。 ```sh -export TDENGINE_API=http://tdengine.local:6041 -# user + password -export TDENGINE_USER=user -export TDENGINE_PASSWORD=password - -# 其他环境变量: -# - 是否安装数据源,默认为 true,表示安装 -export TDENGINE_DS_ENABLED=false -# - 数据源名称,默认为 TDengine -export TDENGINE_DS_NAME=TDengine -# - 数据源所属组织 ID,默认为 1 -export GF_ORG_ID=1 -# - 数据源是否可通过管理面板编辑,默认为 0,表示不可编辑 -export TDENGINE_EDITABLE=1 +bash -c "$(curl -fsSL \ + https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -- \ + -a http://localhost:6041 \ + -u root \ + -p taosdata ``` -运行安装脚本: - -```sh -bash -c "$(curl -fsSL https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -``` - -该脚本将自动安装 Grafana 插件并配置数据源。安装完毕后,需要重启 Grafana 服务后生效。 +安装完毕后,需要重启 Grafana 服务后方可生效。 保存该脚本并执行 `./install.sh --help` 可查看详细帮助文档。 - + 使用 [`grafana-cli` 命令行工具](https://grafana.com/docs/grafana/latest/administration/cli/) 进行插件[安装](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation)。 @@ -113,6 +115,73 @@ GF_INSTALL_PLUGINS=tdengine-datasource ![TDengine Database Grafana plugin add data source](./add_datasource4.webp) + + + +参考 [Grafana 容器化安装说明](https://grafana.com/docs/grafana/next/setup-grafana/installation/docker/#install-plugins-in-the-docker-container)。使用如下命令启动一个容器,并自动安装 TDengine 插件: + +```bash +docker run -d \ + -p 3000:3000 \ + --name=grafana \ + -e "GF_INSTALL_PLUGINS=tdengine-datasource" \ + grafana/grafana +``` + +使用 docker-compose,配置 Grafana Provisioning 自动化配置,体验 TDengine + Grafana 组合的零配置启动: + +1. 保存该文件为 `tdengine.yml`。 + + ```yml + apiVersion: 1 + datasources: + - name: TDengine + type: tdengine-datasource + orgId: 1 + url: "$TDENGINE_API" + isDefault: true + secureJsonData: + url: "$TDENGINE_URL" + basicAuth: "$TDENGINE_BASIC_AUTH" + token: "$TDENGINE_CLOUD_TOKEN" + version: 1 + editable: true + ``` + +2. 保存该文件为 `docker-compose.yml`。 + + ```yml + version: "3.7" + + services: + tdengine: + image: tdengine/tdengine:2.6.0.2 + environment: + TAOS_FQDN: tdengine + volumes: + - tdengine-data:/var/lib/taos/ + grafana: + image: grafana/grafana:8.5.6 + volumes: + - ./tdengine.yml/:/etc/grafana/provisioning/tdengine.yml + - grafana-data:/var/lib/grafana + environment: + # install tdengine plugin at start + GF_INSTALL_PLUGINS: "tdengine-datasource" + TDENGINE_URL: "http://tdengine:6041" + #printf "$TDENGINE_USER:$TDENGINE_PASSWORD" | base64 + TDENGINE_BASIC_AUTH: "cm9vdDp0YmFzZTEyNQ==" + ports: + - 3000:3000 + volumes: + grafana-data: + tdengine-data: + ``` + +3. 使用 docker-compose 命令启动 TDengine + Grafana :`docker-compose up -d`。 + +打开 Grafana ,现在可以添加 Dashboard 了。 + diff --git a/docs/zh/20-third-party/grafana-data-source.png b/docs/zh/20-third-party/grafana-data-source.png new file mode 100644 index 0000000000..989ffcca0b Binary files /dev/null and b/docs/zh/20-third-party/grafana-data-source.png differ diff --git a/docs/zh/20-third-party/grafana-install-and-config.png b/docs/zh/20-third-party/grafana-install-and-config.png new file mode 100644 index 0000000000..b918da8b2d Binary files /dev/null and b/docs/zh/20-third-party/grafana-install-and-config.png differ diff --git a/docs/zh/20-third-party/grafana-plugin-search-tdengine.png b/docs/zh/20-third-party/grafana-plugin-search-tdengine.png new file mode 100644 index 0000000000..cf3b66977b Binary files /dev/null and b/docs/zh/20-third-party/grafana-plugin-search-tdengine.png differ diff --git a/examples/c/stream_demo.c b/examples/c/stream_demo.c index ab59fa5e47..5a141867e7 100644 --- a/examples/c/stream_demo.c +++ b/examples/c/stream_demo.c @@ -32,6 +32,13 @@ int32_t init_env() { } taos_free_result(pRes); + pRes = taos_query(pConn, "create database if not exists abc2 vgroups 20"); + if (taos_errno(pRes) != 0) { + printf("error in create db, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + pRes = taos_query(pConn, "use abc1"); if (taos_errno(pRes) != 0) { printf("error in use db, reason:%s\n", taos_errstr(pRes)); @@ -81,9 +88,9 @@ int32_t create_stream() { /*const char* sql = "select min(k), max(k), sum(k) as sum_of_k from st1";*/ /*const char* sql = "select sum(k) from tu1 interval(10m)";*/ /*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/ - pRes = taos_query( - pConn, - "create stream stream1 trigger max_delay 10s into outstb as select _wstartts, sum(k) from st1 interval(10m)"); + pRes = taos_query(pConn, + "create stream stream1 trigger at_once into abc2.outstb as select _wstartts, sum(k) from st1 " + "partition by tbname interval(10m) "); if (taos_errno(pRes) != 0) { printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes)); return -1; diff --git a/examples/c/tmq.c b/examples/c/tmq.c index 2e8aa21da7..7870d7d9a1 100644 --- a/examples/c/tmq.c +++ b/examples/c/tmq.c @@ -47,7 +47,7 @@ int32_t init_env() { return -1; } - TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 2"); + TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1"); if (taos_errno(pRes) != 0) { printf("error in create db, reason:%s\n", taos_errstr(pRes)); return -1; @@ -146,8 +146,8 @@ int32_t create_topic() { return 0; } -void tmq_commit_cb_print(tmq_t* tmq, tmq_resp_err_t resp, tmq_topic_vgroup_list_t* offsets, void* param) { - printf("commit %d tmq %p offsets %p param %p\n", resp, tmq, offsets, param); +void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { + printf("commit %d tmq %p param %p\n", code, tmq, param); } tmq_t* build_consumer() { @@ -167,7 +167,7 @@ tmq_t* build_consumer() { tmq_conf_set(conf, "td.connect.user", "root"); tmq_conf_set(conf, "td.connect.pass", "taosdata"); tmq_conf_set(conf, "msg.with.table.name", "true"); - tmq_conf_set(conf, "enable.auto.commit", "false"); + tmq_conf_set(conf, "enable.auto.commit", "true"); tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); assert(tmq); @@ -183,10 +183,10 @@ tmq_list_t* build_topic_list() { } void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) { - tmq_resp_err_t err; + int32_t code; - if ((err = tmq_subscribe(tmq, topics))) { - fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(err)); + if ((code = tmq_subscribe(tmq, topics))) { + fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code)); printf("subscribe err\n"); return; } @@ -201,12 +201,13 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) { taos_free_result(tmqmessage); /*} else {*/ /*break;*/ + /*tmq_commit_sync(tmq, NULL);*/ } } - err = tmq_consumer_close(tmq); - if (err) - fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(err)); + code = tmq_consumer_close(tmq); + if (code) + fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code)); else fprintf(stderr, "%% Consumer closed\n"); } @@ -214,11 +215,11 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) { void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) { static const int MIN_COMMIT_COUNT = 1; - int msg_count = 0; - tmq_resp_err_t err; + int msg_count = 0; + int32_t code; - if ((err = tmq_subscribe(tmq, topics))) { - fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(err)); + if ((code = tmq_subscribe(tmq, topics))) { + fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code)); return; } @@ -239,14 +240,14 @@ void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) { msg_process(tmqmessage); taos_free_result(tmqmessage); - /*tmq_commit_async(tmq, NULL, tmq_commit_cb_print, NULL);*/ + /*tmq_commit_sync(tmq, NULL);*/ /*if ((++msg_count % MIN_COMMIT_COUNT) == 0) tmq_commit(tmq, NULL, 0);*/ } } - err = tmq_consumer_close(tmq); - if (err) - fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(err)); + code = tmq_consumer_close(tmq); + if (code) + fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code)); else fprintf(stderr, "%% Consumer closed\n"); } diff --git a/include/client/taos.h b/include/client/taos.h index deb4276b54..61538e392a 100644 --- a/include/client/taos.h +++ b/include/client/taos.h @@ -126,47 +126,47 @@ typedef struct setConfRet { char retMsg[RET_MSG_LENGTH]; } setConfRet; -DLL_EXPORT void taos_cleanup(void); -DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...); -DLL_EXPORT setConfRet taos_set_config(const char *config); -DLL_EXPORT int taos_init(void); -DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port); -DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port); -DLL_EXPORT void taos_close(TAOS *taos); +DLL_EXPORT void taos_cleanup(void); +DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...); +DLL_EXPORT setConfRet taos_set_config(const char *config); +DLL_EXPORT int taos_init(void); +DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port); +DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port); +DLL_EXPORT void taos_close(TAOS *taos); -const char *taos_data_type(int type); +const char *taos_data_type(int type); -DLL_EXPORT TAOS_STMT *taos_stmt_init(TAOS *taos); -DLL_EXPORT int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length); -DLL_EXPORT int taos_stmt_set_tbname_tags(TAOS_STMT *stmt, const char *name, TAOS_MULTI_BIND *tags); -DLL_EXPORT int taos_stmt_set_tbname(TAOS_STMT *stmt, const char *name); -DLL_EXPORT int taos_stmt_set_tags(TAOS_STMT *stmt, TAOS_MULTI_BIND *tags); -DLL_EXPORT int taos_stmt_set_sub_tbname(TAOS_STMT *stmt, const char *name); -DLL_EXPORT int taos_stmt_get_tag_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields); -DLL_EXPORT int taos_stmt_get_col_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields); +DLL_EXPORT TAOS_STMT *taos_stmt_init(TAOS *taos); +DLL_EXPORT int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length); +DLL_EXPORT int taos_stmt_set_tbname_tags(TAOS_STMT *stmt, const char *name, TAOS_MULTI_BIND *tags); +DLL_EXPORT int taos_stmt_set_tbname(TAOS_STMT *stmt, const char *name); +DLL_EXPORT int taos_stmt_set_tags(TAOS_STMT *stmt, TAOS_MULTI_BIND *tags); +DLL_EXPORT int taos_stmt_set_sub_tbname(TAOS_STMT *stmt, const char *name); +DLL_EXPORT int taos_stmt_get_tag_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields); +DLL_EXPORT int taos_stmt_get_col_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fields); -DLL_EXPORT int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert); -DLL_EXPORT int taos_stmt_num_params(TAOS_STMT *stmt, int *nums); -DLL_EXPORT int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes); -DLL_EXPORT int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind); -DLL_EXPORT int taos_stmt_bind_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind); -DLL_EXPORT int taos_stmt_bind_single_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind, int colIdx); -DLL_EXPORT int taos_stmt_add_batch(TAOS_STMT *stmt); -DLL_EXPORT int taos_stmt_execute(TAOS_STMT *stmt); -DLL_EXPORT TAOS_RES *taos_stmt_use_result(TAOS_STMT *stmt); -DLL_EXPORT int taos_stmt_close(TAOS_STMT *stmt); -DLL_EXPORT char *taos_stmt_errstr(TAOS_STMT *stmt); -DLL_EXPORT int taos_stmt_affected_rows(TAOS_STMT *stmt); -DLL_EXPORT int taos_stmt_affected_rows_once(TAOS_STMT *stmt); +DLL_EXPORT int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert); +DLL_EXPORT int taos_stmt_num_params(TAOS_STMT *stmt, int *nums); +DLL_EXPORT int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes); +DLL_EXPORT int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind); +DLL_EXPORT int taos_stmt_bind_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind); +DLL_EXPORT int taos_stmt_bind_single_param_batch(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind, int colIdx); +DLL_EXPORT int taos_stmt_add_batch(TAOS_STMT *stmt); +DLL_EXPORT int taos_stmt_execute(TAOS_STMT *stmt); +DLL_EXPORT TAOS_RES *taos_stmt_use_result(TAOS_STMT *stmt); +DLL_EXPORT int taos_stmt_close(TAOS_STMT *stmt); +DLL_EXPORT char *taos_stmt_errstr(TAOS_STMT *stmt); +DLL_EXPORT int taos_stmt_affected_rows(TAOS_STMT *stmt); +DLL_EXPORT int taos_stmt_affected_rows_once(TAOS_STMT *stmt); -DLL_EXPORT TAOS_RES *taos_query(TAOS *taos, const char *sql); +DLL_EXPORT TAOS_RES *taos_query(TAOS *taos, const char *sql); -DLL_EXPORT TAOS_ROW taos_fetch_row(TAOS_RES *res); -DLL_EXPORT int taos_result_precision(TAOS_RES *res); // get the time precision of result -DLL_EXPORT void taos_free_result(TAOS_RES *res); -DLL_EXPORT int taos_field_count(TAOS_RES *res); -DLL_EXPORT int taos_num_fields(TAOS_RES *res); -DLL_EXPORT int taos_affected_rows(TAOS_RES *res); +DLL_EXPORT TAOS_ROW taos_fetch_row(TAOS_RES *res); +DLL_EXPORT int taos_result_precision(TAOS_RES *res); // get the time precision of result +DLL_EXPORT void taos_free_result(TAOS_RES *res); +DLL_EXPORT int taos_field_count(TAOS_RES *res); +DLL_EXPORT int taos_num_fields(TAOS_RES *res); +DLL_EXPORT int taos_affected_rows(TAOS_RES *res); DLL_EXPORT TAOS_FIELD *taos_fetch_fields(TAOS_RES *res); DLL_EXPORT int taos_select_db(TAOS *taos, const char *db); @@ -181,8 +181,8 @@ DLL_EXPORT int *taos_get_column_data_offset(TAOS_RES *res, int columnInde DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql); DLL_EXPORT void taos_reset_current_db(TAOS *taos); -DLL_EXPORT int *taos_fetch_lengths(TAOS_RES *res); -DLL_EXPORT TAOS_ROW *taos_result_block(TAOS_RES *res); +DLL_EXPORT int *taos_fetch_lengths(TAOS_RES *res); +DLL_EXPORT TAOS_ROW *taos_result_block(TAOS_RES *res); DLL_EXPORT const char *taos_get_server_info(TAOS *taos); DLL_EXPORT const char *taos_get_client_info(); @@ -192,8 +192,8 @@ DLL_EXPORT int taos_errno(TAOS_RES *tres); DLL_EXPORT void taos_query_a(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param); DLL_EXPORT void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param); -DLL_EXPORT void taos_fetch_raw_block_a(TAOS_RES* res, __taos_async_fn_t fp, void* param); -DLL_EXPORT const void *taos_get_raw_block(TAOS_RES* res); +DLL_EXPORT void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param); +DLL_EXPORT const void *taos_get_raw_block(TAOS_RES *res); // Shuduo: temporary enable for app build #if 1 @@ -209,21 +209,11 @@ DLL_EXPORT TAOS_RES *taos_schemaless_insert(TAOS *taos, char *lines[], int numLi /* --------------------------TMQ INTERFACE------------------------------- */ -enum { - TMQ_RESP_ERR__FAIL = -1, - TMQ_RESP_ERR__SUCCESS = 0, -}; - -typedef int32_t tmq_resp_err_t; - -typedef struct tmq_t tmq_t; -typedef struct tmq_topic_vgroup_t tmq_topic_vgroup_t; -typedef struct tmq_topic_vgroup_list_t tmq_topic_vgroup_list_t; - +typedef struct tmq_t tmq_t; typedef struct tmq_conf_t tmq_conf_t; typedef struct tmq_list_t tmq_list_t; -typedef void(tmq_commit_cb(tmq_t *, tmq_resp_err_t, tmq_topic_vgroup_list_t *, void *param)); +typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); DLL_EXPORT tmq_list_t *tmq_list_new(); DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); @@ -233,25 +223,17 @@ DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); -DLL_EXPORT const char *tmq_err2str(tmq_resp_err_t); +DLL_EXPORT const char *tmq_err2str(int32_t code); /* ------------------------TMQ CONSUMER INTERFACE------------------------ */ -DLL_EXPORT tmq_resp_err_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); -DLL_EXPORT tmq_resp_err_t tmq_unsubscribe(tmq_t *tmq); -DLL_EXPORT tmq_resp_err_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); - -// timeout: -1 means infinitely waiting +DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); +DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); +DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); - -DLL_EXPORT tmq_resp_err_t tmq_consumer_close(tmq_t *tmq); -DLL_EXPORT tmq_resp_err_t tmq_commit_sync(tmq_t *tmq, const tmq_topic_vgroup_list_t *offsets); -DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const tmq_topic_vgroup_list_t *offsets, tmq_commit_cb *cb, void *param); - -#if 0 -DLL_EXPORT tmq_resp_err_t tmq_commit(tmq_t *tmq, const tmq_topic_vgroup_list_t *offsets, int32_t async); -DLL_EXPORT tmq_resp_err_t tmq_seek(tmq_t *tmq, const tmq_topic_vgroup_t *offset); -#endif +DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); +DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); +DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); /* ----------------------TMQ CONFIGURATION INTERFACE---------------------- */ @@ -275,11 +257,6 @@ DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res); -#if 0 -DLL_EXPORT int64_t tmq_get_request_offset(tmq_message_t *message); -DLL_EXPORT int64_t tmq_get_response_offset(tmq_message_t *message); -#endif - /* ------------------------------ TMQ END -------------------------------- */ #if 1 // Shuduo: temporary enable for app build diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 2e646f4769..a05287761e 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -35,6 +35,7 @@ enum { TMQ_MSG_TYPE__DUMMY = 0, TMQ_MSG_TYPE__POLL_RSP, TMQ_MSG_TYPE__EP_RSP, + TMQ_MSG_TYPE__END_RSP, }; typedef enum EStreamType { diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 1b44b6d7ea..8c03d3ff42 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -61,6 +61,7 @@ extern int32_t tsNumOfRpcThreads; extern int32_t tsNumOfCommitThreads; extern int32_t tsNumOfTaskQueueThreads; extern int32_t tsNumOfMnodeQueryThreads; +extern int32_t tsNumOfMnodeFetchThreads; extern int32_t tsNumOfMnodeReadThreads; extern int32_t tsNumOfVnodeQueryThreads; extern int32_t tsNumOfVnodeFetchThreads; diff --git a/include/common/tmsg.h b/include/common/tmsg.h index a5e4cba204..8a4203891d 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -106,6 +106,7 @@ typedef enum _mgmt_table { TSDB_MGMT_TABLE_CONNS, TSDB_MGMT_TABLE_QUERIES, TSDB_MGMT_TABLE_VNODES, + TSDB_MGMT_TABLE_APPS, TSDB_MGMT_TABLE_MAX, } EShowType; @@ -1314,8 +1315,6 @@ int32_t tSerializeSSetStandbyReq(void* buf, int32_t bufLen, SSetStandbyReq* pReq int32_t tDeserializeSSetStandbyReq(void* buf, int32_t bufLen, SSetStandbyReq* pReq); typedef struct { - int32_t connId; // todo remove - int32_t queryId; // todo remove char queryStrId[TSDB_QUERY_ID_LEN]; } SKillQueryReq; @@ -1323,7 +1322,7 @@ int32_t tSerializeSKillQueryReq(void* buf, int32_t bufLen, SKillQueryReq* pReq); int32_t tDeserializeSKillQueryReq(void* buf, int32_t bufLen, SKillQueryReq* pReq); typedef struct { - int32_t connId; + uint32_t connId; } SKillConnReq; int32_t tSerializeSKillConnReq(void* buf, int32_t bufLen, SKillConnReq* pReq); @@ -2013,7 +2012,6 @@ typedef struct { int64_t useconds; int64_t stime; // timestamp precision ms int64_t reqRid; - int32_t pid; bool stableQuery; char fqdn[TSDB_FQDN_LEN]; int32_t subPlanNum; @@ -2022,8 +2020,6 @@ typedef struct { typedef struct { uint32_t connId; - int32_t pid; - char app[TSDB_APP_NAME_LEN]; SArray* queryDesc; // SArray } SQueryHbReqBasic; @@ -2038,9 +2034,31 @@ typedef struct { SArray* pQnodeList; } SQueryHbRspBasic; +typedef struct SAppClusterSummary { + uint64_t numOfInsertsReq; + uint64_t numOfInsertRows; + uint64_t insertElapsedTime; + uint64_t insertBytes; // submit to tsdb since launched. + + uint64_t fetchBytes; + uint64_t queryElapsedTime; + uint64_t numOfSlowQueries; + uint64_t totalRequests; + uint64_t currentRequests; // the number of SRequestObj +} SAppClusterSummary; + +typedef struct { + int64_t appId; + int32_t pid; + char name[TSDB_APP_NAME_LEN]; + int64_t startTime; + SAppClusterSummary summary; +} SAppHbReq; + typedef struct { SClientHbKey connKey; int64_t clusterId; + SAppHbReq app; SQueryHbReqBasic* query; SHashObj* info; // hash } SClientHbReq; diff --git a/include/libs/catalog/catalog.h b/include/libs/catalog/catalog.h index f3600a509e..c0116051af 100644 --- a/include/libs/catalog/catalog.h +++ b/include/libs/catalog/catalog.h @@ -292,6 +292,8 @@ int32_t catalogUpdateVgEpSet(SCatalog* pCtg, const char* dbFName, int32_t vgId, int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, SRequestConnInfo* pConn, uint64_t reqId, bool forceUpdate); +int32_t catalogClearCache(void); + /** * Destroy catalog and relase all resources */ diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index fdedb947d7..083f6ae1b0 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -159,11 +159,14 @@ int64_t qGetQueriedTableUid(qTaskInfo_t tinfo); */ int32_t qGetQualifiedTableIdList(void* pTableList, const char* tagCond, int32_t tagCondLen, SArray* pTableIdList); - void qProcessFetchRsp(void* parent, struct SRpcMsg* pMsg, struct SEpSet* pEpSet); int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, int32_t* resNum, SExplainExecInfo** pRes); +int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len); + +int32_t qDeserializeTaskStatus(qTaskInfo_t tinfo, const char* pInput, int32_t len); + #ifdef __cplusplus } #endif diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h index 6f239cc892..08190d978e 100644 --- a/include/libs/nodes/nodes.h +++ b/include/libs/nodes/nodes.h @@ -228,6 +228,7 @@ typedef enum ENodeType { QUERY_NODE_PHYSICAL_PLAN_FILL, QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION, QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION, + QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION, QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION, QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE, QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE, diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 749d58b224..a9002b5d19 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -130,13 +130,17 @@ typedef struct SMergeLogicNode { typedef enum EWindowType { WINDOW_TYPE_INTERVAL = 1, WINDOW_TYPE_SESSION, WINDOW_TYPE_STATE } EWindowType; -typedef enum EIntervalAlgorithm { +typedef enum EWindowAlgorithm { INTERVAL_ALGO_HASH = 1, INTERVAL_ALGO_MERGE, INTERVAL_ALGO_STREAM_FINAL, INTERVAL_ALGO_STREAM_SEMI, INTERVAL_ALGO_STREAM_SINGLE, -} EIntervalAlgorithm; + SESSION_ALGO_STREAM_SEMI, + SESSION_ALGO_STREAM_FINAL, + SESSION_ALGO_STREAM_SINGLE, + SESSION_ALGO_MERGE, +} EWindowAlgorithm; typedef struct SWindowLogicNode { SLogicNode node; @@ -153,7 +157,7 @@ typedef struct SWindowLogicNode { int8_t triggerType; int64_t watermark; double filesFactor; - EIntervalAlgorithm intervalAlgo; + EWindowAlgorithm windowAlgo; } SWindowLogicNode; typedef struct SFillLogicNode { @@ -371,6 +375,8 @@ typedef struct SSessionWinodwPhysiNode { } SSessionWinodwPhysiNode; typedef SSessionWinodwPhysiNode SStreamSessionWinodwPhysiNode; +typedef SSessionWinodwPhysiNode SStreamSemiSessionWinodwPhysiNode; +typedef SSessionWinodwPhysiNode SStreamFinalSessionWinodwPhysiNode; typedef struct SStateWinodwPhysiNode { SWinodwPhysiNode window; diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h index 89eb21556a..b6a3a08f7e 100644 --- a/include/libs/qcom/query.h +++ b/include/libs/qcom/query.h @@ -33,8 +33,6 @@ typedef enum { JOB_TASK_STATUS_PARTIAL_SUCCEED, JOB_TASK_STATUS_SUCCEED, JOB_TASK_STATUS_FAILED, - JOB_TASK_STATUS_CANCELLING, - JOB_TASK_STATUS_CANCELLED, JOB_TASK_STATUS_DROPPING, } EJobTaskType; diff --git a/include/libs/scheduler/scheduler.h b/include/libs/scheduler/scheduler.h index 8087a6d33c..ecb21335b9 100644 --- a/include/libs/scheduler/scheduler.h +++ b/include/libs/scheduler/scheduler.h @@ -73,13 +73,14 @@ typedef void (*schedulerExecCallback)(SQueryResult* pResult, void* param, int32_ typedef void (*schedulerFetchCallback)(void* pResult, void* param, int32_t code); typedef struct SSchedulerReq { - SRequestConnInfo *pConn; - SArray *pNodeList; - SQueryPlan *pDag; - const char *sql; - int64_t startTs; + bool *reqKilled; + SRequestConnInfo *pConn; + SArray *pNodeList; + SQueryPlan *pDag; + const char *sql; + int64_t startTs; schedulerExecCallback fp; - void* cbParam; + void* cbParam; } SSchedulerReq; @@ -127,7 +128,7 @@ void schedulerStopQueryHb(void *pTrans); * Free the query job * @param pJob */ -void schedulerFreeJob(int64_t job); +void schedulerFreeJob(int64_t job, int32_t errCode); void schedulerDestroy(void); diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index f49030466e..e8c4faa240 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -62,6 +62,7 @@ typedef struct SSyncCfg { typedef struct SFsmCbMeta { SyncIndex index; + SyncIndex lastConfigIndex; bool isWeak; int32_t code; ESyncState state; @@ -75,6 +76,7 @@ typedef struct SReConfigCbMeta { int32_t code; SyncIndex index; SyncTerm term; + SyncIndex lastConfigIndex; SyncTerm currentTerm; SSyncCfg oldCfg; SSyncCfg newCfg; diff --git a/include/os/osTime.h b/include/os/osTime.h index b9e407cbcf..949c15ed0d 100644 --- a/include/os/osTime.h +++ b/include/os/osTime.h @@ -23,22 +23,22 @@ extern "C" { // If the error is in a third-party library, place this header file under the third-party library header file. // When you want to use this feature, you should find or add the same function in the following section. #ifndef ALLOW_FORBID_FUNC - #define strptime STRPTIME_FUNC_TAOS_FORBID - #define gettimeofday GETTIMEOFDAY_FUNC_TAOS_FORBID - #define localtime LOCALTIME_FUNC_TAOS_FORBID - #define localtime_s LOCALTIMES_FUNC_TAOS_FORBID - #define localtime_r LOCALTIMER_FUNC_TAOS_FORBID - #define time TIME_FUNC_TAOS_FORBID - #define mktime MKTIME_FUNC_TAOS_FORBID +#define strptime STRPTIME_FUNC_TAOS_FORBID +#define gettimeofday GETTIMEOFDAY_FUNC_TAOS_FORBID +#define localtime LOCALTIME_FUNC_TAOS_FORBID +#define localtime_s LOCALTIMES_FUNC_TAOS_FORBID +#define localtime_r LOCALTIMER_FUNC_TAOS_FORBID +#define time TIME_FUNC_TAOS_FORBID +#define mktime MKTIME_FUNC_TAOS_FORBID #endif #ifdef WINDOWS - #define CLOCK_REALTIME 0 +#define CLOCK_REALTIME 0 - #define MILLISECOND_PER_SECOND (1000i64) +#define MILLISECOND_PER_SECOND (1000i64) #else - #define MILLISECOND_PER_SECOND ((int64_t)1000L) +#define MILLISECOND_PER_SECOND ((int64_t)1000L) #endif #define MILLISECOND_PER_MINUTE (MILLISECOND_PER_SECOND * 60) @@ -82,13 +82,13 @@ static FORCE_INLINE int64_t taosGetTimestampNs() { return (int64_t)systemTime.tv_sec * 1000000000L + (int64_t)systemTime.tv_nsec; } -char *taosStrpTime(const char *buf, const char *fmt, struct tm *tm); +char * taosStrpTime(const char *buf, const char *fmt, struct tm *tm); struct tm *taosLocalTime(const time_t *timep, struct tm *result); -time_t taosTime(time_t *t); -time_t taosMktime(struct tm *timep); +time_t taosTime(time_t *t); +time_t taosMktime(struct tm *timep); #ifdef __cplusplus } #endif -#endif /*_TD_OS_TIME_H_*/ +#endif /*_TD_OS_TIME_H_*/ diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 73366955e4..bd9d8d274e 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -127,6 +127,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TSC_STMT_API_ERROR TAOS_DEF_ERROR_CODE(0, 0X0225) #define TSDB_CODE_TSC_STMT_TBNAME_ERROR TAOS_DEF_ERROR_CODE(0, 0X0226) #define TSDB_CODE_TSC_STMT_CLAUSE_ERROR TAOS_DEF_ERROR_CODE(0, 0X0227) +#define TSDB_CODE_TSC_QUERY_KILLED TAOS_DEF_ERROR_CODE(0, 0X0228) // mnode-common #define TSDB_CODE_MND_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0300) @@ -429,7 +430,8 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TQ_META_KEY_NOT_IN_TXN TAOS_DEF_ERROR_CODE(0, 0x0A09) #define TSDB_CODE_TQ_META_KEY_DUP_IN_TXN TAOS_DEF_ERROR_CODE(0, 0x0A0A) #define TSDB_CODE_TQ_GROUP_NOT_SET TAOS_DEF_ERROR_CODE(0, 0x0A0B) -#define TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x0A0B) +#define TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x0A0C) +#define TSDB_CODE_TQ_NO_COMMITTED_OFFSET TAOS_DEF_ERROR_CODE(0, 0x0A0D) // wal #define TSDB_CODE_WAL_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x1000) @@ -439,108 +441,6 @@ int32_t* taosGetErrno(); #define TSDB_CODE_WAL_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x1004) #define TSDB_CODE_WAL_LOG_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x1005) -// http -#define TSDB_CODE_HTTP_SERVER_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x1100) //"http server is not online" -#define TSDB_CODE_HTTP_UNSUPPORT_URL TAOS_DEF_ERROR_CODE(0, 0x1101) //"url is not support" -#define TSDB_CODE_HTTP_INVALID_URL TAOS_DEF_ERROR_CODE(0, 0x1102) //invalid url format" -#define TSDB_CODE_HTTP_NO_ENOUGH_MEMORY TAOS_DEF_ERROR_CODE(0, 0x1103) //"no enough memory" -#define TSDB_CODE_HTTP_REQUSET_TOO_BIG TAOS_DEF_ERROR_CODE(0, 0x1104) //"request size is too big" -#define TSDB_CODE_HTTP_NO_AUTH_INFO TAOS_DEF_ERROR_CODE(0, 0x1105) //"no auth info input" -#define TSDB_CODE_HTTP_NO_MSG_INPUT TAOS_DEF_ERROR_CODE(0, 0x1106) //"request is empty" -#define TSDB_CODE_HTTP_NO_SQL_INPUT TAOS_DEF_ERROR_CODE(0, 0x1107) //"no sql input" -#define TSDB_CODE_HTTP_NO_EXEC_USEDB TAOS_DEF_ERROR_CODE(0, 0x1108) //"no need to execute use db cmd" -#define TSDB_CODE_HTTP_SESSION_FULL TAOS_DEF_ERROR_CODE(0, 0x1109) //"session list was full" -#define TSDB_CODE_HTTP_GEN_TAOSD_TOKEN_ERR TAOS_DEF_ERROR_CODE(0, 0x110A) //"generate taosd token error" -#define TSDB_CODE_HTTP_INVALID_MULTI_REQUEST TAOS_DEF_ERROR_CODE(0, 0x110B) //"size of multi request is 0" -#define TSDB_CODE_HTTP_CREATE_GZIP_FAILED TAOS_DEF_ERROR_CODE(0, 0x110C) //"failed to create gzip" -#define TSDB_CODE_HTTP_FINISH_GZIP_FAILED TAOS_DEF_ERROR_CODE(0, 0x110D) //"failed to finish gzip" -#define TSDB_CODE_HTTP_LOGIN_FAILED TAOS_DEF_ERROR_CODE(0, 0x110E) //"failed to login" - -#define TSDB_CODE_HTTP_INVALID_VERSION TAOS_DEF_ERROR_CODE(0, 0x1120) //"invalid http version" -#define TSDB_CODE_HTTP_INVALID_CONTENT_LENGTH TAOS_DEF_ERROR_CODE(0, 0x1121) //"invalid content length" -#define TSDB_CODE_HTTP_INVALID_AUTH_TYPE TAOS_DEF_ERROR_CODE(0, 0x1122) //"invalid type of Authorization" -#define TSDB_CODE_HTTP_INVALID_AUTH_FORMAT TAOS_DEF_ERROR_CODE(0, 0x1123) //"invalid format of Authorization" -#define TSDB_CODE_HTTP_INVALID_BASIC_AUTH TAOS_DEF_ERROR_CODE(0, 0x1124) //"invalid basic Authorization" -#define TSDB_CODE_HTTP_INVALID_TAOSD_AUTH TAOS_DEF_ERROR_CODE(0, 0x1125) //"invalid taosd Authorization" -#define TSDB_CODE_HTTP_PARSE_METHOD_FAILED TAOS_DEF_ERROR_CODE(0, 0x1126) //"failed to parse method" -#define TSDB_CODE_HTTP_PARSE_TARGET_FAILED TAOS_DEF_ERROR_CODE(0, 0x1127) //"failed to parse target" -#define TSDB_CODE_HTTP_PARSE_VERSION_FAILED TAOS_DEF_ERROR_CODE(0, 0x1128) //"failed to parse http version" -#define TSDB_CODE_HTTP_PARSE_SP_FAILED TAOS_DEF_ERROR_CODE(0, 0x1129) //"failed to parse sp" -#define TSDB_CODE_HTTP_PARSE_STATUS_FAILED TAOS_DEF_ERROR_CODE(0, 0x112A) //"failed to parse status" -#define TSDB_CODE_HTTP_PARSE_PHRASE_FAILED TAOS_DEF_ERROR_CODE(0, 0x112B) //"failed to parse phrase" -#define TSDB_CODE_HTTP_PARSE_CRLF_FAILED TAOS_DEF_ERROR_CODE(0, 0x112C) //"failed to parse crlf" -#define TSDB_CODE_HTTP_PARSE_HEADER_FAILED TAOS_DEF_ERROR_CODE(0, 0x112D) //"failed to parse header" -#define TSDB_CODE_HTTP_PARSE_HEADER_KEY_FAILED TAOS_DEF_ERROR_CODE(0, 0x112E) //"failed to parse header key" -#define TSDB_CODE_HTTP_PARSE_HEADER_VAL_FAILED TAOS_DEF_ERROR_CODE(0, 0x112F) //"failed to parse header val" -#define TSDB_CODE_HTTP_PARSE_CHUNK_SIZE_FAILED TAOS_DEF_ERROR_CODE(0, 0x1130) //"failed to parse chunk size" -#define TSDB_CODE_HTTP_PARSE_CHUNK_FAILED TAOS_DEF_ERROR_CODE(0, 0x1131) //"failed to parse chunk" -#define TSDB_CODE_HTTP_PARSE_END_FAILED TAOS_DEF_ERROR_CODE(0, 0x1132) //"failed to parse end section" -#define TSDB_CODE_HTTP_PARSE_INVALID_STATE TAOS_DEF_ERROR_CODE(0, 0x1134) //"invalid parse state" -#define TSDB_CODE_HTTP_PARSE_ERROR_STATE TAOS_DEF_ERROR_CODE(0, 0x1135) //"failed to parse error section" - -#define TSDB_CODE_HTTP_GC_QUERY_NULL TAOS_DEF_ERROR_CODE(0, 0x1150) //"query size is 0" -#define TSDB_CODE_HTTP_GC_QUERY_SIZE TAOS_DEF_ERROR_CODE(0, 0x1151) //"query size can not more than 100" -#define TSDB_CODE_HTTP_GC_REQ_PARSE_ERROR TAOS_DEF_ERROR_CODE(0, 0x1152) //"parse grafana json error" - -#define TSDB_CODE_HTTP_TG_DB_NOT_INPUT TAOS_DEF_ERROR_CODE(0, 0x1160) //"database name can not be null" -#define TSDB_CODE_HTTP_TG_DB_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x1161) //"database name too long" -#define TSDB_CODE_HTTP_TG_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x1162) //"invalid telegraf json fromat" -#define TSDB_CODE_HTTP_TG_METRICS_NULL TAOS_DEF_ERROR_CODE(0, 0x1163) //"metrics size is 0" -#define TSDB_CODE_HTTP_TG_METRICS_SIZE TAOS_DEF_ERROR_CODE(0, 0x1164) //"metrics size can not more than 1K" -#define TSDB_CODE_HTTP_TG_METRIC_NULL TAOS_DEF_ERROR_CODE(0, 0x1165) //"metric name not find" -#define TSDB_CODE_HTTP_TG_METRIC_TYPE TAOS_DEF_ERROR_CODE(0, 0x1166) //"metric name type should be string" -#define TSDB_CODE_HTTP_TG_METRIC_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1167) //"metric name length is 0" -#define TSDB_CODE_HTTP_TG_METRIC_NAME_LONG TAOS_DEF_ERROR_CODE(0, 0x1168) //"metric name length too long" -#define TSDB_CODE_HTTP_TG_TIMESTAMP_NULL TAOS_DEF_ERROR_CODE(0, 0x1169) //"timestamp not find" -#define TSDB_CODE_HTTP_TG_TIMESTAMP_TYPE TAOS_DEF_ERROR_CODE(0, 0x116A) //"timestamp type should be integer" -#define TSDB_CODE_HTTP_TG_TIMESTAMP_VAL_NULL TAOS_DEF_ERROR_CODE(0, 0x116B) //"timestamp value smaller than 0" -#define TSDB_CODE_HTTP_TG_TAGS_NULL TAOS_DEF_ERROR_CODE(0, 0x116C) //"tags not find" -#define TSDB_CODE_HTTP_TG_TAGS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x116D) //"tags size is 0" -#define TSDB_CODE_HTTP_TG_TAGS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x116E) //"tags size too long" -#define TSDB_CODE_HTTP_TG_TAG_NULL TAOS_DEF_ERROR_CODE(0, 0x116F) //"tag is null" -#define TSDB_CODE_HTTP_TG_TAG_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1170) //"tag name is null" -#define TSDB_CODE_HTTP_TG_TAG_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x1171) //"tag name length too long" -#define TSDB_CODE_HTTP_TG_TAG_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x1172) //"tag value type should be number or string" -#define TSDB_CODE_HTTP_TG_TAG_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x1173) //"tag value is null" -#define TSDB_CODE_HTTP_TG_TABLE_NULL TAOS_DEF_ERROR_CODE(0, 0x1174) //"table is null" -#define TSDB_CODE_HTTP_TG_TABLE_SIZE TAOS_DEF_ERROR_CODE(0, 0x1175) //"table name length too long" -#define TSDB_CODE_HTTP_TG_FIELDS_NULL TAOS_DEF_ERROR_CODE(0, 0x1176) //"fields not find" -#define TSDB_CODE_HTTP_TG_FIELDS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x1177) //"fields size is 0" -#define TSDB_CODE_HTTP_TG_FIELDS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x1178) //"fields size too long" -#define TSDB_CODE_HTTP_TG_FIELD_NULL TAOS_DEF_ERROR_CODE(0, 0x1179) //"field is null" -#define TSDB_CODE_HTTP_TG_FIELD_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x117A) //"field name is null" -#define TSDB_CODE_HTTP_TG_FIELD_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x117B) //"field name length too long" -#define TSDB_CODE_HTTP_TG_FIELD_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x117C) //"field value type should be number or string" -#define TSDB_CODE_HTTP_TG_FIELD_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x117D) //"field value is null" -#define TSDB_CODE_HTTP_TG_HOST_NOT_STRING TAOS_DEF_ERROR_CODE(0, 0x117E) //"host type should be string" -#define TSDB_CODE_HTTP_TG_STABLE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x117F) //"stable not exist" - -#define TSDB_CODE_HTTP_OP_DB_NOT_INPUT TAOS_DEF_ERROR_CODE(0, 0x1190) //"database name can not be null" -#define TSDB_CODE_HTTP_OP_DB_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x1191) //"database name too long" -#define TSDB_CODE_HTTP_OP_INVALID_JSON TAOS_DEF_ERROR_CODE(0, 0x1192) //"invalid opentsdb json fromat" -#define TSDB_CODE_HTTP_OP_METRICS_NULL TAOS_DEF_ERROR_CODE(0, 0x1193) //"metrics size is 0" -#define TSDB_CODE_HTTP_OP_METRICS_SIZE TAOS_DEF_ERROR_CODE(0, 0x1194) //"metrics size can not more than 10K" -#define TSDB_CODE_HTTP_OP_METRIC_NULL TAOS_DEF_ERROR_CODE(0, 0x1195) //"metric name not find" -#define TSDB_CODE_HTTP_OP_METRIC_TYPE TAOS_DEF_ERROR_CODE(0, 0x1196) //"metric name type should be string" -#define TSDB_CODE_HTTP_OP_METRIC_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x1197) //"metric name length is 0" -#define TSDB_CODE_HTTP_OP_METRIC_NAME_LONG TAOS_DEF_ERROR_CODE(0, 0x1198) //"metric name length can not more than 22" -#define TSDB_CODE_HTTP_OP_TIMESTAMP_NULL TAOS_DEF_ERROR_CODE(0, 0x1199) //"timestamp not find" -#define TSDB_CODE_HTTP_OP_TIMESTAMP_TYPE TAOS_DEF_ERROR_CODE(0, 0x119A) //"timestamp type should be integer" -#define TSDB_CODE_HTTP_OP_TIMESTAMP_VAL_NULL TAOS_DEF_ERROR_CODE(0, 0x119B) //"timestamp value smaller than 0" -#define TSDB_CODE_HTTP_OP_TAGS_NULL TAOS_DEF_ERROR_CODE(0, 0x119C) //"tags not find" -#define TSDB_CODE_HTTP_OP_TAGS_SIZE_0 TAOS_DEF_ERROR_CODE(0, 0x119D) //"tags size is 0" -#define TSDB_CODE_HTTP_OP_TAGS_SIZE_LONG TAOS_DEF_ERROR_CODE(0, 0x119E) //"tags size too long" -#define TSDB_CODE_HTTP_OP_TAG_NULL TAOS_DEF_ERROR_CODE(0, 0x119F) //"tag is null" -#define TSDB_CODE_HTTP_OP_TAG_NAME_NULL TAOS_DEF_ERROR_CODE(0, 0x11A0) //"tag name is null" -#define TSDB_CODE_HTTP_OP_TAG_NAME_SIZE TAOS_DEF_ERROR_CODE(0, 0x11A1) //"tag name length too long" -#define TSDB_CODE_HTTP_OP_TAG_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A2) //"tag value type should be boolean number or string" -#define TSDB_CODE_HTTP_OP_TAG_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A3) //"tag value is null" -#define TSDB_CODE_HTTP_OP_TAG_VALUE_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x11A4) //"tag value can not more than 64" -#define TSDB_CODE_HTTP_OP_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A5) //"value not find" -#define TSDB_CODE_HTTP_OP_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A6) //"value type should be boolean number or string" - -#define TSDB_CODE_HTTP_REQUEST_JSON_ERROR TAOS_DEF_ERROR_CODE(0, 0x1F00) //"http request json error" - // tfs #define TSDB_CODE_FS_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x2200) #define TSDB_CODE_FS_INVLD_CFG TAOS_DEF_ERROR_CODE(0, 0x2201) @@ -570,6 +470,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_SCH_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2502) #define TSDB_CODE_SCH_IGNORE_ERROR TAOS_DEF_ERROR_CODE(0, 0x2503) #define TSDB_CODE_SCH_TIMEOUT_ERROR TAOS_DEF_ERROR_CODE(0, 0x2504) +#define TSDB_CODE_SCH_JOB_IS_DROPPING TAOS_DEF_ERROR_CODE(0, 0x2505) #define TSDB_CODE_QW_MSG_ERROR TAOS_DEF_ERROR_CODE(0, 0x2550) //parser @@ -709,6 +610,8 @@ int32_t* taosGetErrno(); //index #define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200) +//tmq +#define TSDB_CODE_TMQ_INVALID_MSG TAOS_DEF_ERROR_CODE(0, 0x4000) #ifdef __cplusplus } diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index 69a24e20cd..d61262c9dc 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -76,10 +76,12 @@ typedef int32_t (*FHbReqHandle)(SClientHbKey* connKey, void* param, SClientHbReq typedef struct { int8_t inited; + int64_t appId; // ctl int8_t threadStop; TdThread thread; TdThreadMutex lock; // used when app init and cleanup + SHashObj *appSummary; SArray* appHbMgrs; // SArray one for each cluster FHbReqHandle reqHandle[CONN_TYPE__MAX]; FHbRspHandle rspHandle[CONN_TYPE__MAX]; @@ -92,33 +94,20 @@ typedef struct SQueryExecMetric { int64_t rsp; // receive response from server, us } SQueryExecMetric; -typedef struct SInstanceSummary { - uint64_t numOfInsertsReq; - uint64_t numOfInsertRows; - uint64_t insertElapsedTime; - uint64_t insertBytes; // submit to tsdb since launched. - - uint64_t fetchBytes; - uint64_t queryElapsedTime; - uint64_t numOfSlowQueries; - uint64_t totalRequests; - uint64_t currentRequests; // the number of SRequestObj -} SInstanceSummary; - typedef struct SHeartBeatInfo { void* pTimer; // timer, used to send request msg to mnode } SHeartBeatInfo; struct SAppInstInfo { - int64_t numOfConns; - SCorEpSet mgmtEp; - TdThreadMutex qnodeMutex; - SArray* pQnodeList; - SInstanceSummary summary; - SList* pConnList; // STscObj linked list - uint64_t clusterId; - void* pTransporter; - SAppHbMgr* pAppHbMgr; + int64_t numOfConns; + SCorEpSet mgmtEp; + TdThreadMutex qnodeMutex; + SArray* pQnodeList; + SAppClusterSummary summary; + SList* pConnList; // STscObj linked list + uint64_t clusterId; + void* pTransporter; + SAppHbMgr* pAppHbMgr; }; typedef struct SAppInfo { @@ -215,6 +204,7 @@ typedef struct SRequestObj { SRequestSendRecvBody body; bool stableQuery; + bool killed; uint32_t prevCode; //previous error code: todo refactor, add update flag for catalog uint32_t retry; } SRequestObj; diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index bb60624145..24246e5c45 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -48,7 +48,7 @@ static void registerRequest(SRequestObj *pRequest) { int32_t num = atomic_add_fetch_32(&pTscObj->numOfReqs, 1); if (pTscObj->pAppInfo) { - SInstanceSummary *pSummary = &pTscObj->pAppInfo->summary; + SAppClusterSummary *pSummary = &pTscObj->pAppInfo->summary; int32_t total = atomic_add_fetch_64((int64_t *)&pSummary->totalRequests, 1); int32_t currentInst = atomic_add_fetch_64((int64_t *)&pSummary->currentRequests, 1); @@ -62,7 +62,7 @@ static void deregisterRequest(SRequestObj *pRequest) { assert(pRequest != NULL); STscObj *pTscObj = pRequest->pTscObj; - SInstanceSummary *pActivity = &pTscObj->pAppInfo->summary; + SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary; int32_t currentInst = atomic_sub_fetch_64((int64_t *)&pActivity->currentRequests, 1); int32_t num = atomic_sub_fetch_32(&pTscObj->numOfReqs, 1); @@ -229,7 +229,7 @@ static void doDestroyRequest(void *p) { taosHashRemove(pRequest->pTscObj->pRequests, &pRequest->self, sizeof(pRequest->self)); if (pRequest->body.queryJob != 0) { - schedulerFreeJob(pRequest->body.queryJob); + schedulerFreeJob(pRequest->body.queryJob, 0); } taosMemoryFreeClear(pRequest->msgBuf); diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c index e0b8f322bf..c6d0d6a860 100644 --- a/source/client/src/clientHb.c +++ b/source/client/src/clientHb.c @@ -164,6 +164,7 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) { pTscObj->connId = pRsp->query->connId; if (pRsp->query->killRid) { + tscDebug("request rid %" PRIx64 " need to be killed now", pRsp->query->killRid); SRequestObj *pRequest = acquireRequest(pRsp->query->killRid); if (NULL == pRequest) { tscDebug("request 0x%" PRIx64 " not exist to kill", pRsp->query->killRid); @@ -304,7 +305,7 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) { while (pIter != NULL) { int64_t *rid = pIter; SRequestObj *pRequest = acquireRequest(*rid); - if (NULL == pRequest) { + if (NULL == pRequest || pRequest->killed) { pIter = taosHashIterate(pObj->pRequests, pIter); continue; } @@ -314,7 +315,6 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) { desc.queryId = pRequest->requestId; desc.useconds = now - pRequest->metric.start; desc.reqRid = pRequest->self; - desc.pid = hbBasic->pid; desc.stableQuery = pRequest->stableQuery; taosGetFqdn(desc.fqdn); desc.subPlanNum = pRequest->body.pDag ? pRequest->body.pDag->numOfSubplans : 0; @@ -360,8 +360,6 @@ int32_t hbGetQueryBasicInfo(SClientHbKey *connKey, SClientHbReq *req) { } hbBasic->connId = pTscObj->connId; - hbBasic->pid = taosGetPId(); - taosGetAppName(hbBasic->app, NULL); int32_t numOfQueries = pTscObj->pRequests ? taosHashGetSize(pTscObj->pRequests) : 0; if (numOfQueries <= 0) { @@ -507,6 +505,21 @@ int32_t hbGetExpiredStbInfo(SClientHbKey *connKey, struct SCatalog *pCatalog, SC return TSDB_CODE_SUCCESS; } +int32_t hbGetAppInfo(int64_t clusterId, SClientHbReq *req) { + SAppHbReq* pApp = taosHashGet(clientHbMgr.appSummary, &clusterId, sizeof(clusterId)); + if (NULL != pApp) { + memcpy(&req->app, pApp, sizeof(*pApp)); + } else { + memset(&req->app.summary, 0, sizeof(req->app.summary)); + req->app.pid = taosGetPId(); + req->app.appId = clientHbMgr.appId; + taosGetAppName(req->app.name, NULL); + } + + return TSDB_CODE_SUCCESS; +} + + int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req) { int64_t *clusterId = (int64_t *)param; struct SCatalog *pCatalog = NULL; @@ -517,6 +530,8 @@ int32_t hbQueryHbReqHandle(SClientHbKey *connKey, void *param, SClientHbReq *req return code; } + hbGetAppInfo(*clusterId, req); + hbGetQueryBasicInfo(connKey, req); code = hbGetExpiredUserInfo(connKey, pCatalog, req); @@ -589,6 +604,50 @@ void hbThreadFuncUnexpectedStopped(void) { atomic_store_8(&clientHbMgr.threadStop, 2); } +void hbMergeSummary(SAppClusterSummary* dst, SAppClusterSummary* src) { + dst->numOfInsertsReq += src->numOfInsertsReq; + dst->numOfInsertRows += src->numOfInsertRows; + dst->insertElapsedTime += src->insertElapsedTime; + dst->insertBytes += src->insertBytes; + dst->fetchBytes += src->fetchBytes; + dst->queryElapsedTime += src->queryElapsedTime; + dst->numOfSlowQueries += src->numOfSlowQueries; + dst->totalRequests += src->totalRequests; + dst->currentRequests += src->currentRequests; +} + +int32_t hbGatherAppInfo(void) { + SAppHbReq req = {0}; + int sz = taosArrayGetSize(clientHbMgr.appHbMgrs); + if (sz > 0) { + req.pid = taosGetPId(); + req.appId = clientHbMgr.appId; + taosGetAppName(req.name, NULL); + } + + taosHashClear(clientHbMgr.appSummary); + + for (int32_t i = 0; i < sz; ++i) { + SAppHbMgr *pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, i); + uint64_t clusterId = pAppHbMgr->pAppInstInfo->clusterId; + SAppHbReq* pApp = taosHashGet(clientHbMgr.appSummary, &clusterId, sizeof(clusterId)); + if (NULL == pApp) { + memcpy(&req.summary, &pAppHbMgr->pAppInstInfo->summary, sizeof(req.summary)); + req.startTime = pAppHbMgr->startTime; + taosHashPut(clientHbMgr.appSummary, &clusterId, sizeof(clusterId), &req, sizeof(req)); + } else { + if (pAppHbMgr->startTime < pApp->startTime) { + pApp->startTime = pAppHbMgr->startTime; + } + + hbMergeSummary(&pApp->summary, &pAppHbMgr->pAppInstInfo->summary); + } + } + + return TSDB_CODE_SUCCESS; +} + + static void *hbThreadFunc(void *param) { setThreadName("hb"); #ifdef WINDOWS @@ -605,6 +664,10 @@ static void *hbThreadFunc(void *param) { taosThreadMutexLock(&clientHbMgr.lock); int sz = taosArrayGetSize(clientHbMgr.appHbMgrs); + if (sz > 0) { + hbGatherAppInfo(); + } + for (int i = 0; i < sz; i++) { SAppHbMgr *pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, i); @@ -748,6 +811,10 @@ int hbMgrInit() { int8_t old = atomic_val_compare_exchange_8(&clientHbMgr.inited, 0, 1); if (old == 1) return 0; + clientHbMgr.appId = tGenIdPI64(); + tscDebug("app %" PRIx64 " initialized", clientHbMgr.appId); + + clientHbMgr.appSummary = taosHashInit(10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); clientHbMgr.appHbMgrs = taosArrayInit(0, sizeof(void *)); taosThreadMutexInit(&clientHbMgr.lock, NULL); diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 10c2161478..d04cc90ee4 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -418,7 +418,7 @@ int32_t scheduleAsyncQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNod while (true) { if (code != TSDB_CODE_SUCCESS) { if (pRequest->body.queryJob != 0) { - schedulerFreeJob(pRequest->body.queryJob); + schedulerFreeJob(pRequest->body.queryJob, 0); } pRequest->code = code; @@ -439,7 +439,7 @@ int32_t scheduleAsyncQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNod pRequest->body.resInfo.numOfRows = res.numOfRows; if (pRequest->body.queryJob != 0) { - schedulerFreeJob(pRequest->body.queryJob); + schedulerFreeJob(pRequest->body.queryJob, 0); } } @@ -461,14 +461,15 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList .sql = pRequest->sqlstr, .startTs = pRequest->metric.start, .fp = NULL, - .cbParam = NULL}; + .cbParam = NULL, + .reqKilled = &pRequest->killed}; int32_t code = schedulerExecJob(&req, &pRequest->body.queryJob, &res); pRequest->body.resInfo.execRes = res.res; if (code != TSDB_CODE_SUCCESS) { if (pRequest->body.queryJob != 0) { - schedulerFreeJob(pRequest->body.queryJob); + schedulerFreeJob(pRequest->body.queryJob, 0); } pRequest->code = code; @@ -481,7 +482,7 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList pRequest->body.resInfo.numOfRows = res.numOfRows; if (pRequest->body.queryJob != 0) { - schedulerFreeJob(pRequest->body.queryJob); + schedulerFreeJob(pRequest->body.queryJob, 0); } } @@ -608,6 +609,9 @@ void schedulerExecCb(SQueryResult* pResult, void* param, int32_t code) { SRequestObj* pRequest = (SRequestObj*)param; pRequest->code = code; + tscDebug("0x%" PRIx64 " enter scheduler exec cb, code:%d - %s, reqId:0x%" PRIx64, + pRequest->self, code, tstrerror(code), pRequest->requestId); + STscObj* pTscObj = pRequest->pTscObj; if (code != TSDB_CODE_SUCCESS && NEED_CLIENT_HANDLE_ERROR(code)) { tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d, reqId:0x%" PRIx64, @@ -738,7 +742,8 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery) { .sql = pRequest->sqlstr, .startTs = pRequest->metric.start, .fp = schedulerExecCb, - .cbParam = pRequest}; + .cbParam = pRequest, + .reqKilled = &pRequest->killed}; code = schedulerAsyncExecJob(&req, &pRequest->body.queryJob); } else { tscError("0x%" PRIx64 " failed to create query plan, code:%s 0x%" PRIx64, pRequest->self, tstrerror(code), diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 13a5acb472..9a5d5bbeca 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -246,13 +246,14 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { if (TD_RES_QUERY(res)) { SRequestObj *pRequest = (SRequestObj *)res; + if (pRequest->type == TSDB_SQL_RETRIEVE_EMPTY_RESULT || pRequest->type == TSDB_SQL_INSERT || + pRequest->code != TSDB_CODE_SUCCESS || taos_num_fields(res) == 0 || pRequest->killed) { + return NULL; + } + #if SYNC_ON_TOP_OF_ASYNC return doAsyncFetchRows(pRequest, true, true); #else - if (pRequest->type == TSDB_SQL_RETRIEVE_EMPTY_RESULT || pRequest->type == TSDB_SQL_INSERT || - pRequest->code != TSDB_CODE_SUCCESS || taos_num_fields(res) == 0) { - return NULL; - } return doFetchRows(pRequest, true, true); #endif @@ -482,14 +483,20 @@ void taos_stop_query(TAOS_RES *res) { } SRequestObj *pRequest = (SRequestObj *)res; + pRequest->killed = true; + int32_t numOfFields = taos_num_fields(pRequest); - // It is not a query, no need to stop. if (numOfFields == 0) { + tscDebug("request %" PRIx64 " no need to be killed since not query", pRequest->requestId); return; } - schedulerFreeJob(pRequest->body.queryJob); + if (pRequest->body.queryJob) { + schedulerFreeJob(pRequest->body.queryJob, TSDB_CODE_TSC_QUERY_KILLED); + } + + tscDebug("request %" PRIx64 " killed", pRequest->requestId); } bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) { @@ -830,6 +837,9 @@ static void fetchCallback(void *pResult, void *param, int32_t code) { SReqResultInfo *pResultInfo = &pRequest->body.resInfo; + tscDebug("0x%" PRIx64 " enter scheduler fetch cb, code:%d - %s, reqId:0x%" PRIx64, + pRequest->self, code, tstrerror(code), pRequest->requestId); + pResultInfo->pData = pResult; pResultInfo->numOfRows = 0; diff --git a/source/client/src/tmq.c b/source/client/src/tmq.c index dd900e6045..638b4f1ea5 100644 --- a/source/client/src/tmq.c +++ b/source/client/src/tmq.c @@ -48,14 +48,6 @@ struct tmq_list_t { SArray container; }; -struct tmq_topic_vgroup_t { - SMqOffset offset; -}; - -struct tmq_topic_vgroup_list_t { - SArray container; // SArray -}; - struct tmq_conf_t { char clientId[256]; char groupId[TSDB_CGROUP_LEN]; @@ -161,9 +153,9 @@ typedef struct { } SMqPollRspWrapper; typedef struct { - tmq_t* tmq; - tsem_t rspSem; - tmq_resp_err_t rspErr; + tmq_t* tmq; + tsem_t rspSem; + int32_t rspErr; } SMqSubscribeCbParam; typedef struct { @@ -189,7 +181,7 @@ typedef struct { int8_t freeOffsets; tmq_commit_cb* userCb; tsem_t rspSem; - tmq_resp_err_t rspErr; + int32_t rspErr; SArray* offsets; void* userParam; } SMqCommitCbParam; @@ -201,12 +193,12 @@ typedef struct { int8_t freeOffsets; int32_t waitingRspNum; int32_t totalRspNum; - tmq_resp_err_t rspErr; + int32_t rspErr; tmq_commit_cb* userCb; - SArray* successfulOffsets; - SArray* failedOffsets; - void* userParam; - tsem_t rspSem; + /*SArray* successfulOffsets;*/ + /*SArray* failedOffsets;*/ + void* userParam; + tsem_t rspSem; } SMqCommitCbParamSet; typedef struct { @@ -347,10 +339,9 @@ int32_t tmqCommitCb(void* param, const SDataBuf* pMsg, int32_t code) { pParam->rspErr = code; if (pParam->async) { if (pParam->automatic && pParam->tmq->commitCb) { - pParam->tmq->commitCb(pParam->tmq, pParam->rspErr, (tmq_topic_vgroup_list_t*)pParam->offsets, - pParam->tmq->commitCbUserParam); + pParam->tmq->commitCb(pParam->tmq, pParam->rspErr, pParam->tmq->commitCbUserParam); } else if (!pParam->automatic && pParam->userCb) { - pParam->userCb(pParam->tmq, pParam->rspErr, (tmq_topic_vgroup_list_t*)pParam->offsets, pParam->userParam); + pParam->userCb(pParam->tmq, pParam->rspErr, pParam->userParam); } if (pParam->freeOffsets) { @@ -368,11 +359,16 @@ int32_t tmqCommitCb2(void* param, const SDataBuf* pBuf, int32_t code) { SMqCommitCbParam2* pParam = (SMqCommitCbParam2*)param; SMqCommitCbParamSet* pParamSet = (SMqCommitCbParamSet*)pParam->params; // push into array +#if 0 if (code == 0) { taosArrayPush(pParamSet->failedOffsets, &pParam->pOffset); } else { taosArrayPush(pParamSet->successfulOffsets, &pParam->pOffset); } +#endif + + /*tscDebug("receive offset commit cb of %s on vg %d, offset is %ld", pParam->pOffset->subKey, pParam->->vgId, + * pOffset->version);*/ // count down waiting rsp int32_t waitingRspNum = atomic_sub_fetch_32(&pParamSet->waitingRspNum, 1); @@ -383,23 +379,149 @@ int32_t tmqCommitCb2(void* param, const SDataBuf* pBuf, int32_t code) { if (pParamSet->async) { // call async cb func if (pParamSet->automatic && pParamSet->tmq->commitCb) { - pParamSet->tmq->commitCb(pParamSet->tmq, pParamSet->rspErr, NULL, pParamSet->tmq->commitCbUserParam); + pParamSet->tmq->commitCb(pParamSet->tmq, pParamSet->rspErr, pParamSet->tmq->commitCbUserParam); } else if (!pParamSet->automatic && pParamSet->userCb) { // sem post - pParamSet->userCb(pParamSet->tmq, pParamSet->rspErr, NULL, pParamSet->userParam); + pParamSet->userCb(pParamSet->tmq, pParamSet->rspErr, pParamSet->userParam); } + } else { + tsem_post(&pParamSet->rspSem); } +#if 0 taosArrayDestroyP(pParamSet->successfulOffsets, taosMemoryFree); taosArrayDestroyP(pParamSet->failedOffsets, taosMemoryFree); +#endif } return 0; } -int32_t tmqCommitInner2(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, int8_t automatic, int8_t async, - tmq_commit_cb* userCb, void* userParam) { +int32_t tmqCommitInner2(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_t async, tmq_commit_cb* userCb, + void* userParam) { int32_t code = -1; + if (msg != NULL) { + SMqRspObj* pRspObj = (SMqRspObj*)msg; + if (!TD_RES_TMQ(pRspObj)) { + return TSDB_CODE_TMQ_INVALID_MSG; + } + + SMqCommitCbParamSet* pParamSet = taosMemoryCalloc(1, sizeof(SMqCommitCbParamSet)); + if (pParamSet == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + pParamSet->tmq = tmq; + pParamSet->automatic = automatic; + pParamSet->async = async; + pParamSet->freeOffsets = 1; + pParamSet->userCb = userCb; + pParamSet->userParam = userParam; + tsem_init(&pParamSet->rspSem, 0, 0); + + for (int32_t i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) { + SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i); + if (strcmp(pTopic->topicName, pRspObj->topic) == 0) { + for (int32_t j = 0; j < taosArrayGetSize(pTopic->vgs); j++) { + SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j); + if (pVg->vgId == pRspObj->vgId) { + if (pVg->currentOffset < 0 || pVg->committedOffset == pVg->currentOffset) { + tscDebug("consumer %ld skip commit for topic %s vg %d, current offset is %ld, committed offset is %ld", + tmq->consumerId, pTopic->topicName, pVg->vgId, pVg->currentOffset, pVg->committedOffset); + + return 0; + } + + STqOffset* pOffset = taosMemoryCalloc(1, sizeof(STqOffset)); + if (pOffset == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; + } + pOffset->type = TMQ_OFFSET__LOG; + pOffset->version = pVg->currentOffset; + + int32_t groupLen = strlen(tmq->groupId); + memcpy(pOffset->subKey, tmq->groupId, groupLen); + pOffset->subKey[groupLen] = TMQ_SEPARATOR; + strcpy(pOffset->subKey + groupLen + 1, pTopic->topicName); + + int32_t len; + int32_t code; + tEncodeSize(tEncodeSTqOffset, pOffset, len, code); + if (code < 0) { + ASSERT(0); + } + void* buf = taosMemoryCalloc(1, sizeof(SMsgHead) + len); + ((SMsgHead*)buf)->vgId = htonl(pVg->vgId); + + void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead)); + + SEncoder encoder; + tEncoderInit(&encoder, abuf, len); + tEncodeSTqOffset(&encoder, pOffset); + + // build param + SMqCommitCbParam2* pParam = taosMemoryCalloc(1, sizeof(SMqCommitCbParam2)); + pParam->params = pParamSet; + pParam->pOffset = pOffset; + + // build send info + SMsgSendInfo* pMsgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); + if (pMsgSendInfo == NULL) { + // TODO + continue; + } + pMsgSendInfo->msgInfo = (SDataBuf){ + .pData = buf, + .len = sizeof(SMsgHead) + len, + .handle = NULL, + }; + + tscDebug("consumer %ld commit offset of %s on vg %d, offset is %ld", tmq->consumerId, pOffset->subKey, + pVg->vgId, pOffset->version); + + // TODO: put into cb + pVg->committedOffset = pVg->currentOffset; + + pMsgSendInfo->requestId = generateRequestId(); + pMsgSendInfo->requestObjRefId = 0; + pMsgSendInfo->param = pParam; + pMsgSendInfo->fp = tmqCommitCb2; + pMsgSendInfo->msgType = TDMT_VND_MQ_COMMIT_OFFSET; + // send msg + + int64_t transporterId = 0; + asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &pVg->epSet, &transporterId, pMsgSendInfo); + pParamSet->waitingRspNum++; + pParamSet->totalRspNum++; + } + } + } + } + if (pParamSet->totalRspNum == 0) { + tsem_destroy(&pParamSet->rspSem); + taosMemoryFree(pParamSet); + return 0; + } + + if (!async) { + tsem_wait(&pParamSet->rspSem); + code = pParamSet->rspErr; + tsem_destroy(&pParamSet->rspSem); + } else { + code = 0; + } + + if (code != 0 && async) { + if (automatic) { + tmq->commitCb(tmq, code, tmq->commitCbUserParam); + } else { + userCb(tmq, code, userParam); + } + } + return 0; + } + SMqCommitCbParamSet* pParamSet = taosMemoryCalloc(1, sizeof(SMqCommitCbParamSet)); if (pParamSet == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -415,17 +537,35 @@ int32_t tmqCommitInner2(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, int8 for (int32_t i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) { SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i); + + tscDebug("consumer %ld begin commit for topic %s, vgNum %d", tmq->consumerId, pTopic->topicName, + (int32_t)taosArrayGetSize(pTopic->vgs)); + for (int32_t j = 0; j < taosArrayGetSize(pTopic->vgs); j++) { - SMqClientVg* pVg = taosArrayGet(pTopic->vgs, i); - STqOffset* pOffset = taosMemoryCalloc(1, sizeof(STqOffset)); + SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j); + + tscDebug("consumer %ld begin commit for topic %s, vgId %d", tmq->consumerId, pTopic->topicName, pVg->vgId); + + /*if (pVg->currentOffset < 0) {*/ + if (pVg->currentOffset < 0 || pVg->committedOffset == pVg->currentOffset) { + tscDebug("consumer %ld skip commit for topic %s vg %d, current offset is %ld, committed offset is %ld", + tmq->consumerId, pTopic->topicName, pVg->vgId, pVg->currentOffset, pVg->committedOffset); + + continue; + } + STqOffset* pOffset = taosMemoryCalloc(1, sizeof(STqOffset)); if (pOffset == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } - int32_t tlen = strlen(tmq->groupId); - memcpy(pOffset->subKey, tmq->groupId, tlen); - pOffset->subKey[tlen] = TMQ_SEPARATOR; - strcpy(pOffset->subKey + tlen + 1, pTopic->topicName); + pOffset->type = TMQ_OFFSET__LOG; + pOffset->version = pVg->currentOffset; + + int32_t groupLen = strlen(tmq->groupId); + memcpy(pOffset->subKey, tmq->groupId, groupLen); + pOffset->subKey[groupLen] = TMQ_SEPARATOR; + strcpy(pOffset->subKey + groupLen + 1, pTopic->topicName); + int32_t len; int32_t code; tEncodeSize(tEncodeSTqOffset, pOffset, len, code); @@ -454,25 +594,36 @@ int32_t tmqCommitInner2(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, int8 } pMsgSendInfo->msgInfo = (SDataBuf){ .pData = buf, - .len = len, + .len = sizeof(SMsgHead) + len, .handle = NULL, }; + tscDebug("consumer %ld commit offset of %s on vg %d, offset is %ld", tmq->consumerId, pOffset->subKey, pVg->vgId, + pOffset->version); + + // TODO: put into cb + pVg->committedOffset = pVg->currentOffset; + pMsgSendInfo->requestId = generateRequestId(); pMsgSendInfo->requestObjRefId = 0; pMsgSendInfo->param = pParam; pMsgSendInfo->fp = tmqCommitCb2; - pMsgSendInfo->msgType = TDMT_MND_MQ_COMMIT_OFFSET; + pMsgSendInfo->msgType = TDMT_VND_MQ_COMMIT_OFFSET; // send msg - SEpSet epSet = getEpSet_s(&tmq->pTscObj->pAppInfo->mgmtEp); int64_t transporterId = 0; - asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, pMsgSendInfo); + asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &pVg->epSet, &transporterId, pMsgSendInfo); pParamSet->waitingRspNum++; pParamSet->totalRspNum++; } } + if (pParamSet->totalRspNum == 0) { + tsem_destroy(&pParamSet->rspSem); + taosMemoryFree(pParamSet); + return 0; + } + if (!async) { tsem_wait(&pParamSet->rspSem); code = pParamSet->rspErr; @@ -483,21 +634,24 @@ int32_t tmqCommitInner2(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, int8 if (code != 0 && async) { if (automatic) { - tmq->commitCb(tmq, code, NULL, tmq->commitCbUserParam); + tmq->commitCb(tmq, code, tmq->commitCbUserParam); } else { - userCb(tmq, code, NULL, userParam); + userCb(tmq, code, userParam); } } +#if 0 if (!async) { taosArrayDestroyP(pParamSet->successfulOffsets, taosMemoryFree); taosArrayDestroyP(pParamSet->failedOffsets, taosMemoryFree); } +#endif return 0; } -int32_t tmqCommitInner(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, int8_t automatic, int8_t async, +#if 0 +int32_t tmqCommitInner(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_t async, tmq_commit_cb* userCb, void* userParam) { SMqCMCommitOffsetReq req; SArray* pOffsets = NULL; @@ -507,7 +661,7 @@ int32_t tmqCommitInner(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, int8_ int8_t freeOffsets; int32_t code = -1; - if (offsets == NULL) { + if (msg == NULL) { freeOffsets = 1; pOffsets = taosArrayInit(0, sizeof(SMqOffset)); for (int32_t i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) { @@ -524,7 +678,7 @@ int32_t tmqCommitInner(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, int8_ } } else { freeOffsets = 0; - pOffsets = (SArray*)&offsets->container; + pOffsets = (SArray*)&msg->container; } req.num = (int32_t)pOffsets->size; @@ -611,6 +765,7 @@ END: } return code; } +#endif void tmqAssignDelayedHbTask(void* param, void* tmrId) { tmq_t* tmq = (tmq_t*)param; @@ -648,7 +803,7 @@ int32_t tmqHandleAllDelayedTask(tmq_t* tmq) { tmqAskEp(tmq, true); taosTmrReset(tmqAssignDelayedHbTask, 1000, tmq, tmqMgmt.timer, &tmq->hbTimer); } else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) { - tmqCommitInner(tmq, NULL, 1, 1, tmq->commitCb, tmq->commitCbUserParam); + tmqCommitInner2(tmq, NULL, 1, 1, tmq->commitCb, tmq->commitCbUserParam); taosTmrReset(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, tmq, tmqMgmt.timer, &tmq->commitTimer); } else if (*pTaskType == TMQ_DELAYED_TASK__REPORT) { } else { @@ -689,7 +844,7 @@ int32_t tmqSubscribeCb(void* param, const SDataBuf* pMsg, int32_t code) { return 0; } -tmq_resp_err_t tmq_subscription(tmq_t* tmq, tmq_list_t** topics) { +int32_t tmq_subscription(tmq_t* tmq, tmq_list_t** topics) { if (*topics == NULL) { *topics = tmq_list_new(); } @@ -697,12 +852,12 @@ tmq_resp_err_t tmq_subscription(tmq_t* tmq, tmq_list_t** topics) { SMqClientTopic* topic = taosArrayGet(tmq->clientTopics, i); tmq_list_append(*topics, strchr(topic->topicName, '.') + 1); } - return TMQ_RESP_ERR__SUCCESS; + return 0; } -tmq_resp_err_t tmq_unsubscribe(tmq_t* tmq) { - tmq_list_t* lst = tmq_list_new(); - tmq_resp_err_t rsp = tmq_subscribe(tmq, lst); +int32_t tmq_unsubscribe(tmq_t* tmq) { + tmq_list_t* lst = tmq_list_new(); + int32_t rsp = tmq_subscribe(tmq, lst); tmq_list_destroy(lst); return rsp; } @@ -818,11 +973,13 @@ FAIL: return NULL; } -tmq_resp_err_t tmq_commit(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, int32_t async) { - return tmqCommitInner(tmq, offsets, 0, async, tmq->commitCb, tmq->commitCbUserParam); +#if 0 +int32_t tmq_commit(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, int32_t async) { + return tmqCommitInner2(tmq, offsets, 0, async, tmq->commitCb, tmq->commitCbUserParam); } +#endif -tmq_resp_err_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { +int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { const SArray* container = &topic_list->container; int32_t sz = taosArrayGetSize(container); void* buf = NULL; @@ -862,7 +1019,7 @@ tmq_resp_err_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) { if (sendInfo == NULL) goto FAIL; SMqSubscribeCbParam param = { - .rspErr = TMQ_RESP_ERR__SUCCESS, + .rspErr = 0, .tmq = tmq, }; @@ -943,6 +1100,19 @@ int32_t tmqPollCb(void* param, const SDataBuf* pMsg, int32_t code) { if (code != 0) { tscWarn("msg discard from vg %d, epoch %d, code:%x", vgId, epoch, code); if (pMsg->pData) taosMemoryFree(pMsg->pData); + if (code == TSDB_CODE_TQ_NO_COMMITTED_OFFSET) { + SMqPollRspWrapper* pRspWrapper = taosAllocateQitem(sizeof(SMqPollRspWrapper), DEF_QITEM); + if (pRspWrapper == NULL) { + taosMemoryFree(pMsg->pData); + tscWarn("msg discard from vg %d, epoch %d since out of memory", vgId, epoch); + goto CREATE_MSG_FAIL; + } + pRspWrapper->tmqRspType = TMQ_MSG_TYPE__END_RSP; + /*pRspWrapper->vgHandle = pVg;*/ + /*pRspWrapper->topicHandle = pTopic;*/ + taosWriteQitem(tmq->mqueue, pRspWrapper); + tsem_post(&tmq->rspSem); + } goto CREATE_MSG_FAIL; } @@ -1017,14 +1187,13 @@ bool tmqUpdateEp2(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) { if (pTopicCur->vgs) { int32_t vgNumCur = taosArrayGetSize(pTopicCur->vgs); tscDebug("consumer %ld new vg num: %d", tmq->consumerId, vgNumCur); - if (vgNumCur == 0) break; for (int32_t j = 0; j < vgNumCur; j++) { SMqClientVg* pVgCur = taosArrayGet(pTopicCur->vgs, j); sprintf(vgKey, "%s:%d", pTopicCur->topicName, pVgCur->vgId); - tscDebug("consumer %ld epoch %d vg %d build %s", tmq->consumerId, epoch, pVgCur->vgId, vgKey); + tscDebug("consumer %ld epoch %d vg %d vgKey is %s, offset is %ld", tmq->consumerId, epoch, pVgCur->vgId, vgKey, + pVgCur->currentOffset); taosHashPut(pHash, vgKey, strlen(vgKey), &pVgCur->currentOffset, sizeof(int64_t)); } - break; } } @@ -1032,7 +1201,6 @@ bool tmqUpdateEp2(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) { SMqClientTopic topic = {0}; SMqSubTopicEp* pTopicEp = taosArrayGet(pRsp->topics, i); topic.schema = pTopicEp->schema; - taosHashClear(pHash); topic.topicName = strdup(pTopicEp->topic); tstrncpy(topic.db, pTopicEp->db, TSDB_DB_FNAME_LEN); @@ -1049,7 +1217,8 @@ bool tmqUpdateEp2(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) { offset = *pOffset; } - tscDebug("consumer %ld(epoch %d) offset of vg %d updated to %ld", tmq->consumerId, epoch, pVgEp->vgId, offset); + tscDebug("consumer %ld(epoch %d) offset of vg %d updated to %ld, vgKey is %s", tmq->consumerId, epoch, + pVgEp->vgId, offset, vgKey); SMqClientVg clientVg = { .pollCnt = 0, .currentOffset = offset, @@ -1076,6 +1245,7 @@ bool tmqUpdateEp2(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) { return set; } +#if 1 bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) { /*printf("call update ep %d\n", epoch);*/ bool set = false; @@ -1160,6 +1330,7 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) { atomic_store_32(&tmq->epoch, epoch); return set; } +#endif int32_t tmqAskEpCb(void* param, const SDataBuf* pMsg, int32_t code) { SMqAskEpCbParam* pParam = (SMqAskEpCbParam*)param; @@ -1186,7 +1357,7 @@ int32_t tmqAskEpCb(void* param, const SDataBuf* pMsg, int32_t code) { tDecodeSMqAskEpRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &rsp); /*printf("rsp epoch %ld sz %ld\n", rsp.epoch, rsp.topics->size);*/ /*printf("tmq epoch %ld sz %ld\n", tmq->epoch, tmq->clientTopics->size);*/ - tmqUpdateEp(tmq, head->epoch, &rsp); + tmqUpdateEp2(tmq, head->epoch, &rsp); tDeleteSMqAskEpRsp(&rsp); } else { SMqAskEpRspWrapper* pWrapper = taosAllocateQitem(sizeof(SMqAskEpRspWrapper), DEF_QITEM); @@ -1283,7 +1454,8 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) { return code; } -tmq_resp_err_t tmq_seek(tmq_t* tmq, const tmq_topic_vgroup_t* offset) { +#if 0 +int32_t tmq_seek(tmq_t* tmq, const tmq_topic_vgroup_t* offset) { const SMqOffset* pOffset = &offset->offset; if (strcmp(pOffset->cgroup, tmq->groupId) != 0) { return TMQ_RESP_ERR__FAIL; @@ -1305,16 +1477,17 @@ tmq_resp_err_t tmq_seek(tmq_t* tmq, const tmq_topic_vgroup_t* offset) { } return TMQ_RESP_ERR__FAIL; } +#endif SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t timeout, SMqClientTopic* pTopic, SMqClientVg* pVg) { int64_t reqOffset; if (pVg->currentOffset >= 0) { reqOffset = pVg->currentOffset; } else { - if (tmq->resetOffsetCfg == TMQ_CONF__RESET_OFFSET__NONE) { - tscError("unable to poll since no committed offset but reset offset is set to none"); - return NULL; - } + /*if (tmq->resetOffsetCfg == TMQ_CONF__RESET_OFFSET__NONE) {*/ + /*tscError("unable to poll since no committed offset but reset offset is set to none");*/ + /*return NULL;*/ + /*}*/ reqOffset = tmq->resetOffsetCfg; } @@ -1326,10 +1499,10 @@ SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t timeout, SMqClientTopic* /*strcpy(pReq->topic, pTopic->topicName);*/ /*strcpy(pReq->cgroup, tmq->groupId);*/ - int32_t tlen = strlen(tmq->groupId); - memcpy(pReq->subKey, tmq->groupId, tlen); - pReq->subKey[tlen] = TMQ_SEPARATOR; - strcpy(pReq->subKey + tlen + 1, pTopic->topicName); + int32_t groupLen = strlen(tmq->groupId); + memcpy(pReq->subKey, tmq->groupId, groupLen); + pReq->subKey[groupLen] = TMQ_SEPARATOR; + strcpy(pReq->subKey + groupLen + 1, pTopic->topicName); pReq->withTbName = tmq->withTbName; pReq->timeout = timeout; @@ -1440,7 +1613,7 @@ int32_t tmqHandleNoPollRsp(tmq_t* tmq, SMqRspWrapper* rspWrapper, bool* pReset) if (rspWrapper->epoch > atomic_load_32(&tmq->epoch)) { SMqAskEpRspWrapper* pEpRspWrapper = (SMqAskEpRspWrapper*)rspWrapper; SMqAskEpRsp* rspMsg = &pEpRspWrapper->msg; - tmqUpdateEp(tmq, rspWrapper->epoch, rspMsg); + tmqUpdateEp2(tmq, rspWrapper->epoch, rspMsg); /*tmqClearUnhandleMsg(tmq);*/ *pReset = true; } else { @@ -1462,7 +1635,11 @@ SMqRspObj* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { if (rspWrapper == NULL) return NULL; } - if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__POLL_RSP) { + if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__END_RSP) { + taosFreeQitem(rspWrapper); + terrno = TSDB_CODE_TQ_NO_COMMITTED_OFFSET; + return NULL; + } else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__POLL_RSP) { SMqPollRspWrapper* pollRspWrapper = (SMqPollRspWrapper*)rspWrapper; /*atomic_sub_fetch_32(&tmq->readyRequest, 1);*/ int32_t consumerEpoch = atomic_load_32(&tmq->epoch); @@ -1523,6 +1700,8 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { rspObj = tmqHandleAllRsp(tmq, timeout, false); if (rspObj) { return (TAOS_RES*)rspObj; + } else if (terrno == TSDB_CODE_TQ_NO_COMMITTED_OFFSET) { + return NULL; } if (timeout != -1) { int64_t endTime = taosGetTimestampMs(); @@ -1539,10 +1718,10 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) { } } -tmq_resp_err_t tmq_consumer_close(tmq_t* tmq) { +int32_t tmq_consumer_close(tmq_t* tmq) { if (tmq->status == TMQ_CONSUMER_STATUS__READY) { - tmq_resp_err_t rsp = tmq_commit_sync(tmq, NULL); - if (rsp != TMQ_RESP_ERR__SUCCESS) { + int32_t rsp = tmq_commit_sync(tmq, NULL); + if (rsp != 0) { return rsp; } @@ -1550,18 +1729,18 @@ tmq_resp_err_t tmq_consumer_close(tmq_t* tmq) { rsp = tmq_subscribe(tmq, lst); tmq_list_destroy(lst); - if (rsp != TMQ_RESP_ERR__SUCCESS) { + if (rsp != 0) { return rsp; } } // TODO: free resources - return TMQ_RESP_ERR__SUCCESS; + return 0; } -const char* tmq_err2str(tmq_resp_err_t err) { - if (err == TMQ_RESP_ERR__SUCCESS) { +const char* tmq_err2str(int32_t err) { + if (err == 0) { return "success"; - } else if (err == TMQ_RESP_ERR__FAIL) { + } else if (err == -1) { return "fail"; } else { return tstrerror(err); @@ -1607,10 +1786,8 @@ const char* tmq_get_table_name(TAOS_RES* res) { return NULL; } -void tmq_commit_async(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets, tmq_commit_cb* cb, void* param) { - tmqCommitInner(tmq, offsets, 0, 1, cb, param); +void tmq_commit_async(tmq_t* tmq, const TAOS_RES* msg, tmq_commit_cb* cb, void* param) { + tmqCommitInner2(tmq, msg, 0, 1, cb, param); } -tmq_resp_err_t tmq_commit_sync(tmq_t* tmq, const tmq_topic_vgroup_list_t* offsets) { - return tmqCommitInner(tmq, offsets, 0, 0, NULL, NULL); -} +int32_t tmq_commit_sync(tmq_t* tmq, const TAOS_RES* msg) { return tmqCommitInner2(tmq, msg, 0, 0, NULL, NULL); } diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 3b0cb3b6aa..e7b6342150 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -77,7 +77,7 @@ static const SSysDbTableSchema userDBSchema[] = { {.name = "ntables", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, {.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, {.name = "strict", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "duration", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, + {.name = "duration", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "keep", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "buffer", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "pagesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, @@ -220,7 +220,8 @@ static const SSysDbTableSchema transSchema[] = { {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, {.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, - {.name = "db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "db1", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "db2", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "failed_times", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, {.name = "last_action_info", @@ -302,7 +303,7 @@ static const SSysDbTableSchema offsetSchema[] = { }; static const SSysDbTableSchema querySchema[] = { - {.name = "query_id", .bytes = 26 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "query_id", .bytes = TSDB_QUERY_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "req_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, {.name = "connId", .bytes = 4, .type = TSDB_DATA_TYPE_UINT}, {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, @@ -317,6 +318,24 @@ static const SSysDbTableSchema querySchema[] = { {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, }; +static const SSysDbTableSchema appSchema[] = { + {.name = "app_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT}, + {.name = "ip", .bytes = TSDB_IPv4ADDR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, + {.name = "name", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "start_time", .bytes = 8 , .type = TSDB_DATA_TYPE_TIMESTAMP}, + {.name = "insert_req", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT}, + {.name = "insert_row", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT}, + {.name = "insert_time", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT}, + {.name = "insert_bytes", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT}, + {.name = "fetch_bytes", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT}, + {.name = "query_time", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT}, + {.name = "show_query", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT}, + {.name = "total_req", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT}, + {.name = "current_req", .bytes = 8 , .type = TSDB_DATA_TYPE_UBIGINT}, + {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, +}; + static const SSysTableMeta perfsMeta[] = { {TSDB_PERFS_TABLE_CONNECTIONS, connectionsSchema, tListLen(connectionsSchema)}, {TSDB_PERFS_TABLE_QUERIES, querySchema, tListLen(querySchema)}, @@ -327,6 +346,7 @@ static const SSysTableMeta perfsMeta[] = { {TSDB_PERFS_TABLE_TRANS, transSchema, tListLen(transSchema)}, {TSDB_PERFS_TABLE_SMAS, smaSchema, tListLen(smaSchema)}, {TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema)}, + {TSDB_PERFS_TABLE_APPS, appSchema, tListLen(appSchema)} }; void getInfosDbMeta(const SSysTableMeta** pInfosTableMeta, size_t* size) { diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 712b4fcf42..3c3d3e953d 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1713,6 +1713,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks } char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId) { + ASSERT(stbName[0] != 0); SArray* tags = taosArrayInit(0, sizeof(void*)); SSmlKv* pTag = taosMemoryCalloc(1, sizeof(SSmlKv)); pTag->key = "group_id"; diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index fbb4f78425..7457fe7eb6 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -51,15 +51,16 @@ int32_t tsNumOfShmThreads = 1; int32_t tsNumOfRpcThreads = 1; int32_t tsNumOfCommitThreads = 2; int32_t tsNumOfTaskQueueThreads = 1; -int32_t tsNumOfMnodeQueryThreads = 1; +int32_t tsNumOfMnodeQueryThreads = 2; +int32_t tsNumOfMnodeFetchThreads = 1; int32_t tsNumOfMnodeReadThreads = 1; int32_t tsNumOfVnodeQueryThreads = 2; -int32_t tsNumOfVnodeFetchThreads = 2; +int32_t tsNumOfVnodeFetchThreads = 1; int32_t tsNumOfVnodeWriteThreads = 2; int32_t tsNumOfVnodeSyncThreads = 2; int32_t tsNumOfVnodeMergeThreads = 2; int32_t tsNumOfQnodeQueryThreads = 2; -int32_t tsNumOfQnodeFetchThreads = 2; +int32_t tsNumOfQnodeFetchThreads = 1; int32_t tsNumOfSnodeSharedThreads = 2; int32_t tsNumOfSnodeUniqueThreads = 2; @@ -417,8 +418,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 1); if (cfgAddInt32(pCfg, "numOfVnodeQueryThreads", tsNumOfVnodeQueryThreads, 1, 1024, 0) != 0) return -1; - tsNumOfVnodeFetchThreads = tsNumOfCores / 2; - tsNumOfVnodeFetchThreads = TRANGE(tsNumOfVnodeFetchThreads, 2, 4); + tsNumOfVnodeFetchThreads = TRANGE(tsNumOfVnodeFetchThreads, 1, 1); if (cfgAddInt32(pCfg, "numOfVnodeFetchThreads", tsNumOfVnodeFetchThreads, 1, 1024, 0) != 0) return -1; tsNumOfVnodeWriteThreads = tsNumOfCores; @@ -437,8 +437,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 1); if (cfgAddInt32(pCfg, "numOfQnodeQueryThreads", tsNumOfQnodeQueryThreads, 1, 1024, 0) != 0) return -1; - tsNumOfQnodeFetchThreads = tsNumOfCores / 2; - tsNumOfQnodeFetchThreads = TRANGE(tsNumOfQnodeFetchThreads, 2, 4); + tsNumOfQnodeFetchThreads = TRANGE(tsNumOfQnodeFetchThreads, 1, 1); if (cfgAddInt32(pCfg, "numOfQnodeFetchThreads", tsNumOfQnodeFetchThreads, 1, 1024, 0) != 0) return -1; tsNumOfSnodeSharedThreads = tsNumOfCores / 4; diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index a5acede1b0..99e7d1ea05 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -191,13 +191,25 @@ static int32_t tSerializeSClientHbReq(SEncoder *pEncoder, const SClientHbReq *pR if (tEncodeSClientHbKey(pEncoder, &pReq->connKey) < 0) return -1; if (pReq->connKey.connType == CONN_TYPE__QUERY) { + if (tEncodeI64(pEncoder, pReq->app.appId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->app.pid) < 0) return -1; + if (tEncodeCStr(pEncoder, pReq->app.name) < 0) return -1; + if (tEncodeI64(pEncoder, pReq->app.startTime) < 0) return -1; + if (tEncodeU64(pEncoder, pReq->app.summary.numOfInsertsReq) < 0) return -1; + if (tEncodeU64(pEncoder, pReq->app.summary.numOfInsertRows) < 0) return -1; + if (tEncodeU64(pEncoder, pReq->app.summary.insertElapsedTime) < 0) return -1; + if (tEncodeU64(pEncoder, pReq->app.summary.insertBytes) < 0) return -1; + if (tEncodeU64(pEncoder, pReq->app.summary.fetchBytes) < 0) return -1; + if (tEncodeU64(pEncoder, pReq->app.summary.queryElapsedTime) < 0) return -1; + if (tEncodeU64(pEncoder, pReq->app.summary.numOfSlowQueries) < 0) return -1; + if (tEncodeU64(pEncoder, pReq->app.summary.totalRequests) < 0) return -1; + if (tEncodeU64(pEncoder, pReq->app.summary.currentRequests) < 0) return -1; + int32_t queryNum = 0; if (pReq->query) { queryNum = 1; if (tEncodeI32(pEncoder, queryNum) < 0) return -1; if (tEncodeU32(pEncoder, pReq->query->connId) < 0) return -1; - if (tEncodeI32(pEncoder, pReq->query->pid) < 0) return -1; - if (tEncodeCStr(pEncoder, pReq->query->app) < 0) return -1; int32_t num = taosArrayGetSize(pReq->query->queryDesc); if (tEncodeI32(pEncoder, num) < 0) return -1; @@ -209,7 +221,6 @@ static int32_t tSerializeSClientHbReq(SEncoder *pEncoder, const SClientHbReq *pR if (tEncodeI64(pEncoder, desc->useconds) < 0) return -1; if (tEncodeI64(pEncoder, desc->stime) < 0) return -1; if (tEncodeI64(pEncoder, desc->reqRid) < 0) return -1; - if (tEncodeI32(pEncoder, desc->pid) < 0) return -1; if (tEncodeI8(pEncoder, desc->stableQuery) < 0) return -1; if (tEncodeCStr(pEncoder, desc->fqdn) < 0) return -1; if (tEncodeI32(pEncoder, desc->subPlanNum) < 0) return -1; @@ -243,14 +254,26 @@ static int32_t tDeserializeSClientHbReq(SDecoder *pDecoder, SClientHbReq *pReq) if (tDecodeSClientHbKey(pDecoder, &pReq->connKey) < 0) return -1; if (pReq->connKey.connType == CONN_TYPE__QUERY) { + if (tDecodeI64(pDecoder, &pReq->app.appId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->app.pid) < 0) return -1; + if (tDecodeCStrTo(pDecoder, pReq->app.name) < 0) return -1; + if (tDecodeI64(pDecoder, &pReq->app.startTime) < 0) return -1; + if (tDecodeU64(pDecoder, &pReq->app.summary.numOfInsertsReq) < 0) return -1; + if (tDecodeU64(pDecoder, &pReq->app.summary.numOfInsertRows) < 0) return -1; + if (tDecodeU64(pDecoder, &pReq->app.summary.insertElapsedTime) < 0) return -1; + if (tDecodeU64(pDecoder, &pReq->app.summary.insertBytes) < 0) return -1; + if (tDecodeU64(pDecoder, &pReq->app.summary.fetchBytes) < 0) return -1; + if (tDecodeU64(pDecoder, &pReq->app.summary.queryElapsedTime) < 0) return -1; + if (tDecodeU64(pDecoder, &pReq->app.summary.numOfSlowQueries) < 0) return -1; + if (tDecodeU64(pDecoder, &pReq->app.summary.totalRequests) < 0) return -1; + if (tDecodeU64(pDecoder, &pReq->app.summary.currentRequests) < 0) return -1; + int32_t queryNum = 0; if (tDecodeI32(pDecoder, &queryNum) < 0) return -1; if (queryNum) { pReq->query = taosMemoryCalloc(1, sizeof(*pReq->query)); if (NULL == pReq->query) return -1; if (tDecodeU32(pDecoder, &pReq->query->connId) < 0) return -1; - if (tDecodeI32(pDecoder, &pReq->query->pid) < 0) return -1; - if (tDecodeCStrTo(pDecoder, pReq->query->app) < 0) return -1; int32_t num = 0; if (tDecodeI32(pDecoder, &num) < 0) return -1; @@ -265,7 +288,6 @@ static int32_t tDeserializeSClientHbReq(SDecoder *pDecoder, SClientHbReq *pReq) if (tDecodeI64(pDecoder, &desc.useconds) < 0) return -1; if (tDecodeI64(pDecoder, &desc.stime) < 0) return -1; if (tDecodeI64(pDecoder, &desc.reqRid) < 0) return -1; - if (tDecodeI32(pDecoder, &desc.pid) < 0) return -1; if (tDecodeI8(pDecoder, (int8_t*)&desc.stableQuery) < 0) return -1; if (tDecodeCStrTo(pDecoder, desc.fqdn) < 0) return -1; if (tDecodeI32(pDecoder, &desc.subPlanNum) < 0) return -1; @@ -3410,7 +3432,7 @@ int32_t tSerializeSKillConnReq(void *buf, int32_t bufLen, SKillConnReq *pReq) { tEncoderInit(&encoder, buf, bufLen); if (tStartEncode(&encoder) < 0) return -1; - if (tEncodeI32(&encoder, pReq->connId) < 0) return -1; + if (tEncodeU32(&encoder, pReq->connId) < 0) return -1; tEndEncode(&encoder); int32_t tlen = encoder.pos; @@ -3423,7 +3445,7 @@ int32_t tDeserializeSKillConnReq(void *buf, int32_t bufLen, SKillConnReq *pReq) tDecoderInit(&decoder, buf, bufLen); if (tStartDecode(&decoder) < 0) return -1; - if (tDecodeI32(&decoder, &pReq->connId) < 0) return -1; + if (tDecodeU32(&decoder, &pReq->connId) < 0) return -1; tEndDecode(&decoder); tDecoderClear(&decoder); diff --git a/source/dnode/mgmt/mgmt_mnode/inc/mmInt.h b/source/dnode/mgmt/mgmt_mnode/inc/mmInt.h index c5c3d76f1e..a4c37dd334 100644 --- a/source/dnode/mgmt/mgmt_mnode/inc/mmInt.h +++ b/source/dnode/mgmt/mgmt_mnode/inc/mmInt.h @@ -30,6 +30,7 @@ typedef struct SMnodeMgmt { const char *path; const char *name; SSingleWorker queryWorker; + SSingleWorker fetchWorker; SSingleWorker readWorker; SSingleWorker writeWorker; SSingleWorker syncWorker; @@ -57,6 +58,7 @@ int32_t mmPutMsgToWriteQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t mmPutMsgToSyncQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t mmPutMsgToReadQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t mmPutMsgToQueryQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg); +int32_t mmPutMsgToFetchQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t mmPutMsgToMonitorQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t mmPutMsgToQueue(SMnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc); diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c b/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c index 493f4ab85f..88f667ce9e 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c @@ -122,6 +122,13 @@ int32_t mmPutMsgToQueryQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) { return mmPutMsgToWorker(pMgmt, &pMgmt->queryWorker, pMsg); } +int32_t mmPutMsgToFetchQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) { + pMsg->info.node = pMgmt->pMnode; + + return mmPutMsgToWorker(pMgmt, &pMgmt->fetchWorker, pMsg); +} + + int32_t mmPutMsgToMonitorQueue(SMnodeMgmt *pMgmt, SRpcMsg *pMsg) { return mmPutMsgToWorker(pMgmt, &pMgmt->monitorWorker, pMsg); } @@ -135,6 +142,9 @@ int32_t mmPutMsgToQueue(SMnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) { case QUERY_QUEUE: pWorker = &pMgmt->queryWorker; break; + case FETCH_QUEUE: + pWorker = &pMgmt->fetchWorker; + break; case READ_QUEUE: pWorker = &pMgmt->readWorker; break; @@ -167,6 +177,18 @@ int32_t mmStartWorker(SMnodeMgmt *pMgmt) { return -1; } + SSingleWorkerCfg fCfg = { + .min = tsNumOfMnodeFetchThreads, + .max = tsNumOfMnodeFetchThreads, + .name = "mnode-fetch", + .fp = (FItem)mmProcessRpcMsg, + .param = pMgmt, + }; + if (tSingleWorkerInit(&pMgmt->fetchWorker, &fCfg) != 0) { + dError("failed to start mnode-fetch worker since %s", terrstr()); + return -1; + } + SSingleWorkerCfg rCfg = { .min = tsNumOfMnodeReadThreads, .max = tsNumOfMnodeReadThreads, @@ -224,6 +246,7 @@ void mmStopWorker(SMnodeMgmt *pMgmt) { tSingleWorkerCleanup(&pMgmt->monitorWorker); tSingleWorkerCleanup(&pMgmt->queryWorker); + tSingleWorkerCleanup(&pMgmt->fetchWorker); tSingleWorkerCleanup(&pMgmt->readWorker); tSingleWorkerCleanup(&pMgmt->writeWorker); tSingleWorkerCleanup(&pMgmt->syncWorker); diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 4daeeaa9bf..8963f6be39 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -124,7 +124,8 @@ typedef struct { int32_t lastErrorNo; tmsg_t lastMsgType; SEpSet lastEpset; - char dbname[TSDB_DB_FNAME_LEN]; + char dbname1[TSDB_DB_FNAME_LEN]; + char dbname2[TSDB_DB_FNAME_LEN]; int32_t startFunc; int32_t stopFunc; int32_t paramLen; diff --git a/source/dnode/mnode/impl/inc/mndInt.h b/source/dnode/mnode/impl/inc/mndInt.h index cc9bc5b634..37bae1d5c0 100644 --- a/source/dnode/mnode/impl/inc/mndInt.h +++ b/source/dnode/mnode/impl/inc/mndInt.h @@ -67,7 +67,8 @@ typedef struct { } SShowMgmt; typedef struct { - SCacheObj *cache; + SCacheObj *connCache; + SCacheObj *appCache; } SProfileMgmt; typedef struct { diff --git a/source/dnode/mnode/impl/inc/mndTrans.h b/source/dnode/mnode/impl/inc/mndTrans.h index 0175e29a77..bc2d5c82b1 100644 --- a/source/dnode/mnode/impl/inc/mndTrans.h +++ b/source/dnode/mnode/impl/inc/mndTrans.h @@ -68,7 +68,7 @@ int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction); int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction); void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen); void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen); -void mndTransSetDbName(STrans *pTrans, const char *dbname); +void mndTransSetDbName(STrans *pTrans, const char *dbname1, const char *dbname2); void mndTransSetSerial(STrans *pTrans); int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans); diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 2eeff9cb33..345464399e 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -477,7 +477,7 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate, mDebug("trans:%d, used to create db:%s", pTrans->id, pCreate->db); - mndTransSetDbName(pTrans, dbObj.name); + mndTransSetDbName(pTrans, dbObj.name, NULL); if (mndSetCreateDbRedoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER; if (mndSetCreateDbUndoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER; if (mndSetCreateDbCommitLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER; @@ -668,7 +668,7 @@ static int32_t mndAlterDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pOld, SDbObj *p mDebug("trans:%d, used to alter db:%s", pTrans->id, pOld->name); int32_t code = -1; - mndTransSetDbName(pTrans, pOld->name); + mndTransSetDbName(pTrans, pOld->name, NULL); if (mndSetAlterDbRedoLogs(pMnode, pTrans, pOld, pNew) != 0) goto _OVER; if (mndSetAlterDbCommitLogs(pMnode, pTrans, pOld, pNew) != 0) goto _OVER; if (mndSetAlterDbRedoActions(pMnode, pTrans, pOld, pNew) != 0) goto _OVER; @@ -921,7 +921,7 @@ static int32_t mndDropDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb) { if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to drop db:%s", pTrans->id, pDb->name); - mndTransSetDbName(pTrans, pDb->name); + mndTransSetDbName(pTrans, pDb->name, NULL); if (mndSetDropDbRedoLogs(pMnode, pTrans, pDb) != 0) goto _OVER; if (mndSetDropDbCommitLogs(pMnode, pTrans, pDb) != 0) goto _OVER; @@ -1391,11 +1391,13 @@ static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, in pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, rows, (const char *)strict, false); - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.daysPerFile, false); - char tmp[128] = {0}; int32_t len = 0; + len = sprintf(&tmp[VARSTR_HEADER_SIZE], "%dm", pDb->cfg.daysPerFile); + varDataSetLen(tmp, len); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, rows, (const char *)tmp, false); + if (pDb->cfg.daysToKeep0 > pDb->cfg.daysToKeep1 || pDb->cfg.daysToKeep0 > pDb->cfg.daysToKeep2) { len = sprintf(&tmp[VARSTR_HEADER_SIZE], "%dm,%dm,%dm", pDb->cfg.daysToKeep1, pDb->cfg.daysToKeep2, pDb->cfg.daysToKeep0); diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index d1bed90175..58c3570c36 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -761,11 +761,12 @@ static int32_t mndRetrieveDnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)&pDnode->createdTime, false); - char b[tListLen(offlineReason) + VARSTR_HEADER_SIZE] = {0}; + char *b = taosMemoryCalloc(VARSTR_HEADER_SIZE + strlen(offlineReason[pDnode->offlineReason]) + 1, 1); STR_TO_VARSTR(b, online ? "" : offlineReason[pDnode->offlineReason]); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, b, false); + taosMemoryFreeClear(b); numOfRows++; sdbRelease(pSdb, pDnode); diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index aa8bc33906..c18727bf84 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -548,8 +548,7 @@ static int32_t mndCheckMnodeState(SRpcMsg *pMsg) { if (IsReq(pMsg) && pMsg->msgType != TDMT_MND_MQ_TIMER && pMsg->msgType != TDMT_MND_TELEM_TIMER && pMsg->msgType != TDMT_MND_TRANS_TIMER) { - mError("msg:%p, failed to check mnode state since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle, - TMSG_INFO(pMsg->msgType)); + mError("msg:%p, failed to check mnode state since %s, type:%s", pMsg, terrstr(), TMSG_INFO(pMsg->msgType)); SEpSet epSet = {0}; mndGetMnodeEpSet(pMsg->info.node, &epSet); @@ -572,7 +571,8 @@ static int32_t mndCheckMsgContent(SRpcMsg *pMsg) { if (!IsReq(pMsg)) return 0; if (pMsg->contLen != 0 && pMsg->pCont != NULL) return 0; - mError("msg:%p, failed to check msg content, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); + mError("msg:%p, failed to check msg, cont:%p contLen:%d, app:%p type:%s", pMsg, pMsg->pCont, pMsg->contLen, + pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); terrno = TSDB_CODE_INVALID_MSG_LEN; return -1; } @@ -596,7 +596,7 @@ int32_t mndProcessRpcMsg(SRpcMsg *pMsg) { if (code == TSDB_CODE_ACTION_IN_PROGRESS) { mTrace("msg:%p, won't response immediately since in progress", pMsg); } else if (code == 0) { - mTrace("msg:%p, successfully processed and response", pMsg); + mTrace("msg:%p, successfully processed", pMsg); } else { mError("msg:%p, failed to process since %s, app:%p type:%s", pMsg, terrstr(), pMsg->info.ahandle, TMSG_INFO(pMsg->msgType)); diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index 39d21aad49..fd80679316 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -43,6 +43,16 @@ typedef struct { SArray *pQueries; // SArray } SConnObj; +typedef struct { + int64_t appId; + uint32_t ip; + int32_t pid; + char name[TSDB_APP_NAME_LEN]; + int64_t startTime; + SAppClusterSummary summary; + int64_t lastAccessTimeMs; +} SAppObj; + static SConnObj *mndCreateConn(SMnode *pMnode, const char *user, int8_t connType, uint32_t ip, uint16_t port, int32_t pid, const char *app, int64_t startTime); static void mndFreeConn(SConnObj *pConn); @@ -57,14 +67,24 @@ static int32_t mndProcessKillConnReq(SRpcMsg *pReq); static int32_t mndRetrieveConns(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); static void mndCancelGetNextQuery(SMnode *pMnode, void *pIter); +static void mndFreeApp(SAppObj *pApp); +static int32_t mndRetrieveApps(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); +static void mndCancelGetNextApp(SMnode *pMnode, void *pIter); int32_t mndInitProfile(SMnode *pMnode) { SProfileMgmt *pMgmt = &pMnode->profileMgmt; // in ms - int32_t connCheckTime = tsShellActivityTimer * 2 * 1000; - pMgmt->cache = taosCacheInit(TSDB_DATA_TYPE_INT, connCheckTime, true, (__cache_free_fn_t)mndFreeConn, "conn"); - if (pMgmt->cache == NULL) { + int32_t checkTime = tsShellActivityTimer * 2 * 1000; + pMgmt->connCache = taosCacheInit(TSDB_DATA_TYPE_UINT, checkTime, true, (__cache_free_fn_t)mndFreeConn, "conn"); + if (pMgmt->connCache == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + mError("failed to alloc profile cache since %s", terrstr()); + return -1; + } + + pMgmt->appCache = taosCacheInit(TSDB_DATA_TYPE_BIGINT, checkTime, true, (__cache_free_fn_t)mndFreeApp, "app"); + if (pMgmt->appCache == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; mError("failed to alloc profile cache since %s", terrstr()); return -1; @@ -79,15 +99,22 @@ int32_t mndInitProfile(SMnode *pMnode) { mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_CONNS, mndCancelGetNextConn); mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_QUERIES, mndRetrieveQueries); mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_QUERIES, mndCancelGetNextQuery); + mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_APPS, mndRetrieveApps); + mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_APPS, mndCancelGetNextApp); return 0; } void mndCleanupProfile(SMnode *pMnode) { SProfileMgmt *pMgmt = &pMnode->profileMgmt; - if (pMgmt->cache != NULL) { - taosCacheCleanup(pMgmt->cache); - pMgmt->cache = NULL; + if (pMgmt->connCache != NULL) { + taosCacheCleanup(pMgmt->connCache); + pMgmt->connCache = NULL; + } + + if (pMgmt->appCache != NULL) { + taosCacheCleanup(pMgmt->appCache); + pMgmt->appCache = NULL; } } @@ -97,7 +124,7 @@ static SConnObj *mndCreateConn(SMnode *pMnode, const char *user, int8_t connType char connStr[255] = {0}; int32_t len = snprintf(connStr, sizeof(connStr), "%s%d%d%d%s", user, ip, port, pid, app); - int32_t connId = mndGenerateUid(connStr, len); + uint32_t connId = mndGenerateUid(connStr, len); if (startTime == 0) startTime = taosGetTimestampMs(); SConnObj connObj = {.id = connId, @@ -118,7 +145,7 @@ static SConnObj *mndCreateConn(SMnode *pMnode, const char *user, int8_t connType tstrncpy(connObj.app, app, TSDB_APP_NAME_LEN); int32_t keepTime = tsShellActivityTimer * 3; - SConnObj *pConn = taosCachePut(pMgmt->cache, &connId, sizeof(int32_t), &connObj, sizeof(connObj), keepTime * 1000); + SConnObj *pConn = taosCachePut(pMgmt->connCache, &connId, sizeof(uint32_t), &connObj, sizeof(connObj), keepTime * 1000); if (pConn == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; mError("conn:%d, failed to put into cache since %s, user:%s", connId, user, terrstr()); @@ -140,14 +167,13 @@ static void mndFreeConn(SConnObj *pConn) { static SConnObj *mndAcquireConn(SMnode *pMnode, uint32_t connId) { SProfileMgmt *pMgmt = &pMnode->profileMgmt; - SConnObj *pConn = taosCacheAcquireByKey(pMgmt->cache, &connId, sizeof(connId)); + SConnObj *pConn = taosCacheAcquireByKey(pMgmt->connCache, &connId, sizeof(connId)); if (pConn == NULL) { mDebug("conn:%u, already destroyed", connId); return NULL; } - int32_t keepTime = tsShellActivityTimer * 3; - pConn->lastAccessTimeMs = keepTime * 1000 + (uint64_t)taosGetTimestampMs(); + pConn->lastAccessTimeMs = taosGetTimestampMs(); mTrace("conn:%u, acquired from cache, data:%p", pConn->id, pConn); return pConn; @@ -158,7 +184,7 @@ static void mndReleaseConn(SMnode *pMnode, SConnObj *pConn) { mTrace("conn:%u, released from cache, data:%p", pConn->id, pConn); SProfileMgmt *pMgmt = &pMnode->profileMgmt; - taosCacheRelease(pMgmt->cache, (void **)&pConn, false); + taosCacheRelease(pMgmt->connCache, (void **)&pConn, false); } void *mndGetNextConn(SMnode *pMnode, SCacheIter *pIter) { @@ -276,6 +302,77 @@ static int32_t mndSaveQueryList(SConnObj *pConn, SQueryHbReqBasic *pBasic) { return TSDB_CODE_SUCCESS; } +static SAppObj *mndCreateApp(SMnode *pMnode, uint32_t clientIp, SAppHbReq* pReq) { + SProfileMgmt *pMgmt = &pMnode->profileMgmt; + + SAppObj app; + app.appId = pReq->appId; + app.ip = clientIp; + app.pid = pReq->pid; + strcpy(app.name, pReq->name); + app.startTime = pReq->startTime; + memcpy(&app.summary, &pReq->summary, sizeof(pReq->summary)); + app.lastAccessTimeMs = taosGetTimestampMs(); + + int32_t keepTime = tsShellActivityTimer * 3; + SAppObj *pApp = taosCachePut(pMgmt->appCache, &pReq->appId, sizeof(pReq->appId), &app, sizeof(app), keepTime * 1000); + if (pApp == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + mError("failed to app %" PRIx64 " into cache since %s", pReq->appId, terrstr()); + return NULL; + } + + mTrace("app %" PRIx64 " is put into cache", pReq->appId); + return pApp; +} + +static void mndFreeApp(SAppObj *pApp) { + mTrace("app %" PRIx64 " is destroyed", pApp->appId); +} + + +static SAppObj *mndAcquireApp(SMnode *pMnode, int64_t appId) { + SProfileMgmt *pMgmt = &pMnode->profileMgmt; + + SAppObj *pApp = taosCacheAcquireByKey(pMgmt->appCache, &appId, sizeof(appId)); + if (pApp == NULL) { + mDebug("app %" PRIx64 " not in cache", appId); + return NULL; + } + + pApp->lastAccessTimeMs = (uint64_t)taosGetTimestampMs(); + + mTrace("app %" PRIx64 " acquired from cache", appId); + return pApp; +} + +static void mndReleaseApp(SMnode *pMnode, SAppObj *pApp) { + if (pApp == NULL) return; + mTrace("release app %" PRIx64 " to cache", pApp->appId); + + SProfileMgmt *pMgmt = &pMnode->profileMgmt; + taosCacheRelease(pMgmt->appCache, (void **)&pApp, false); +} + +void *mndGetNextApp(SMnode *pMnode, SCacheIter *pIter) { + SAppObj *pApp = NULL; + bool hasNext = taosCacheIterNext(pIter); + if (hasNext) { + size_t dataLen = 0; + pApp = taosCacheIterGetData(pIter, &dataLen); + } else { + taosCacheDestroyIter(pIter); + } + + return pApp; +} + +static void mndCancelGetNextApp(SMnode *pMnode, void *pIter) { + if (pIter != NULL) { + taosCacheDestroyIter(pIter); + } +} + static SClientHbRsp *mndMqHbBuildRsp(SMnode *pMnode, SClientHbReq *pReq) { #if 0 SClientHbRsp* pRsp = taosMemoryMalloc(sizeof(SClientHbRsp)); @@ -341,25 +438,48 @@ static SClientHbRsp *mndMqHbBuildRsp(SMnode *pMnode, SClientHbReq *pReq) { return NULL; } +static int32_t mndUpdateAppInfo(SMnode *pMnode, SClientHbReq *pHbReq, SRpcConnInfo *connInfo) { + SAppHbReq* pReq = &pHbReq->app; + SAppObj *pApp = mndAcquireApp(pMnode, pReq->appId); + if (pApp == NULL) { + pApp = mndCreateApp(pMnode, connInfo->clientIp, pReq); + if (pApp == NULL) { + mError("failed to create new app %" PRIx64 " since %s", pReq->appId, terrstr()); + return -1; + } else { + mDebug("a new app %" PRIx64 "created", pReq->appId); + mndReleaseApp(pMnode, pApp); + return TSDB_CODE_SUCCESS; + } + } + + memcpy(&pApp->summary, &pReq->summary, sizeof(pReq->summary)); + + mndReleaseApp(pMnode, pApp); + + return TSDB_CODE_SUCCESS; +} + static int32_t mndProcessQueryHeartBeat(SMnode *pMnode, SRpcMsg *pMsg, SClientHbReq *pHbReq, SClientHbBatchRsp *pBatchRsp) { SProfileMgmt *pMgmt = &pMnode->profileMgmt; SClientHbRsp hbRsp = {.connKey = pHbReq->connKey, .status = 0, .info = NULL, .query = NULL}; + SRpcConnInfo connInfo = pMsg->info.conn; + + mndUpdateAppInfo(pMnode, pHbReq, &connInfo); if (pHbReq->query) { SQueryHbReqBasic *pBasic = pHbReq->query; - SRpcConnInfo connInfo = pMsg->info.conn; - SConnObj *pConn = mndAcquireConn(pMnode, pBasic->connId); if (pConn == NULL) { pConn = mndCreateConn(pMnode, connInfo.user, CONN_TYPE__QUERY, connInfo.clientIp, connInfo.clientPort, - pBasic->pid, pBasic->app, 0); + pHbReq->app.pid, pHbReq->app.name, 0); if (pConn == NULL) { mError("user:%s, conn:%u is freed and failed to create new since %s", connInfo.user, pBasic->connId, terrstr()); return -1; } else { - mDebug("user:%s, conn:%u is freed and create a new conn:%u", connInfo.user, pBasic->connId, pConn->id); + mDebug("user:%s, conn:%u is freed, will create a new conn:%u", connInfo.user, pBasic->connId, pConn->id); } } @@ -516,17 +636,28 @@ static int32_t mndProcessKillQueryReq(SRpcMsg *pReq) { return -1; } - mInfo("kill query msg is received, queryId:%d", killReq.queryId); + mInfo("kill query msg is received, queryId:%s", killReq.queryStrId); + int32_t connId = 0; + uint64_t queryId = 0; + char* p = strchr(killReq.queryStrId, ':'); + if (NULL == p) { + mError("invalid query id %s", killReq.queryStrId); + terrno = TSDB_CODE_MND_INVALID_QUERY_ID; + return -1; + } + *p = 0; + connId = taosStr2Int32(killReq.queryStrId, NULL, 16); + queryId = taosStr2UInt64(p + 1, NULL, 16); - SConnObj *pConn = taosCacheAcquireByKey(pMgmt->cache, &killReq.connId, sizeof(int32_t)); + SConnObj *pConn = taosCacheAcquireByKey(pMgmt->connCache, &connId, sizeof(int32_t)); if (pConn == NULL) { - mError("connId:%d, failed to kill queryId:%d, conn not exist", killReq.connId, killReq.queryId); + mError("connId:%x, failed to kill queryId:%" PRIx64 ", conn not exist", connId, queryId); terrno = TSDB_CODE_MND_INVALID_CONN_ID; return -1; } else { - mInfo("connId:%d, queryId:%d is killed by user:%s", killReq.connId, killReq.queryId, pReq->info.conn.user); - pConn->killId = killReq.queryId; - taosCacheRelease(pMgmt->cache, (void **)&pConn, false); + mInfo("connId:%x, queryId:%" PRIx64 " is killed by user:%s", connId, queryId, pReq->info.conn.user); + pConn->killId = queryId; + taosCacheRelease(pMgmt->connCache, (void **)&pConn, false); return 0; } } @@ -550,15 +681,15 @@ static int32_t mndProcessKillConnReq(SRpcMsg *pReq) { return -1; } - SConnObj *pConn = taosCacheAcquireByKey(pMgmt->cache, &killReq.connId, sizeof(int32_t)); + SConnObj *pConn = taosCacheAcquireByKey(pMgmt->connCache, &killReq.connId, sizeof(uint32_t)); if (pConn == NULL) { - mError("connId:%d, failed to kill connection, conn not exist", killReq.connId); + mError("connId:%u, failed to kill connection, conn not exist", killReq.connId); terrno = TSDB_CODE_MND_INVALID_CONN_ID; return -1; } else { - mInfo("connId:%d, is killed by user:%s", killReq.connId, pReq->info.conn.user); + mInfo("connId:%u, is killed by user:%s", killReq.connId, pReq->info.conn.user); pConn->killed = 1; - taosCacheRelease(pMgmt->cache, (void **)&pConn, false); + taosCacheRelease(pMgmt->connCache, (void **)&pConn, false); return TSDB_CODE_SUCCESS; } } @@ -572,7 +703,7 @@ static int32_t mndRetrieveConns(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl if (pShow->pIter == NULL) { SProfileMgmt *pMgmt = &pMnode->profileMgmt; - pShow->pIter = taosCacheCreateIter(pMgmt->cache); + pShow->pIter = taosCacheCreateIter(pMgmt->connCache); } while (numOfRows < rows) { @@ -628,7 +759,7 @@ static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p if (pShow->pIter == NULL) { SProfileMgmt *pMgmt = &pMnode->profileMgmt; - pShow->pIter = taosCacheCreateIter(pMgmt->cache); + pShow->pIter = taosCacheCreateIter(pMgmt->connCache); } while (numOfRows < rows) { @@ -667,7 +798,7 @@ static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p colDataAppend(pColInfo, numOfRows, (const char *)app, false); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataAppend(pColInfo, numOfRows, (const char *)&pQuery->pid, false); + colDataAppend(pColInfo, numOfRows, (const char *)&pConn->pid, false); char user[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0}; STR_TO_VARSTR(user, pConn->user); @@ -721,6 +852,86 @@ static int32_t mndRetrieveQueries(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p return numOfRows; } +static int32_t mndRetrieveApps(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) { + SMnode *pMnode = pReq->info.node; + SSdb *pSdb = pMnode->pSdb; + int32_t numOfRows = 0; + int32_t cols = 0; + SAppObj *pApp = NULL; + + if (pShow->pIter == NULL) { + SProfileMgmt *pMgmt = &pMnode->profileMgmt; + pShow->pIter = taosCacheCreateIter(pMgmt->appCache); + } + + while (numOfRows < rows) { + pApp = mndGetNextApp(pMnode, pShow->pIter); + if (pApp == NULL) { + pShow->pIter = NULL; + break; + } + + cols = 0; + + SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pApp->appId, false); + + char ip[TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE] = {0}; + sprintf(&ip[VARSTR_HEADER_SIZE], "%s", taosIpStr(pApp->ip)); + varDataLen(ip) = strlen(&ip[VARSTR_HEADER_SIZE]); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)ip, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pApp->pid, false); + + char name[TSDB_APP_NAME_LEN + 6 + VARSTR_HEADER_SIZE] = {0}; + sprintf(&name[VARSTR_HEADER_SIZE], "%s", pApp->name); + varDataLen(name) = strlen(&name[VARSTR_HEADER_SIZE]); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)name, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pApp->startTime, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pApp->summary.numOfInsertsReq, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pApp->summary.numOfInsertRows, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pApp->summary.insertElapsedTime, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pApp->summary.insertBytes, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pApp->summary.fetchBytes, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pApp->summary.queryElapsedTime, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pApp->summary.numOfSlowQueries, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pApp->summary.totalRequests, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pApp->summary.currentRequests, false); + + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pApp->lastAccessTimeMs, false); + + numOfRows++; + } + + pShow->numOfRows += numOfRows; + return numOfRows; +} + + static void mndCancelGetNextQuery(SMnode *pMnode, void *pIter) { if (pIter != NULL) { taosCacheDestroyIter(pIter); @@ -729,5 +940,5 @@ static void mndCancelGetNextQuery(SMnode *pMnode, void *pIter) { int32_t mndGetNumOfConnections(SMnode *pMnode) { SProfileMgmt *pMgmt = &pMnode->profileMgmt; - return taosCacheGetNumOfObj(pMgmt->cache); + return taosCacheGetNumOfObj(pMgmt->connCache); } diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c index 6f8fc748c2..39bb6798aa 100644 --- a/source/dnode/mnode/impl/src/mndScheduler.c +++ b/source/dnode/mnode/impl/src/mndScheduler.c @@ -105,7 +105,7 @@ int32_t mndPersistTaskDeployReq(STrans* pTrans, SStreamTask* pTask, const SEpSet int32_t size = encoder.pos; int32_t tlen = sizeof(SMsgHead) + size; tEncoderClear(&encoder); - void* buf = taosMemoryMalloc(tlen); + void* buf = taosMemoryCalloc(1, tlen); if (buf == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; @@ -157,6 +157,7 @@ int32_t mndAddDispatcherToInnerTask(SMnode* pMnode, STrans* pTrans, SStreamObj* } sdbRelease(pMnode->pSdb, pDb); + memcpy(pTask->shuffleDispatcher.stbFullName, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN); SArray* pVgs = pTask->shuffleDispatcher.dbInfo.pVgroupInfos; int32_t sz = taosArrayGetSize(pVgs); SArray* sinkLv = taosArrayGetP(pStream->tasks, 0); @@ -166,6 +167,7 @@ int32_t mndAddDispatcherToInnerTask(SMnode* pMnode, STrans* pTrans, SStreamObj* for (int32_t j = 0; j < sinkLvSize; j++) { SStreamTask* pLastLevelTask = taosArrayGetP(sinkLv, j); if (pLastLevelTask->nodeId == pVgInfo->vgId) { + ASSERT(pVgInfo->vgId > 0); pVgInfo->taskId = pLastLevelTask->taskId; ASSERT(pVgInfo->taskId != 0); break; diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c index a6447ed405..fed6a5a721 100644 --- a/source/dnode/mnode/impl/src/mndShow.c +++ b/source/dnode/mnode/impl/src/mndShow.c @@ -104,6 +104,8 @@ static int32_t convertToRetrieveType(char *name, int32_t len) { type = TSDB_MGMT_TABLE_TOPICS; } else if (strncasecmp(name, TSDB_PERFS_TABLE_STREAMS, len) == 0) { type = TSDB_MGMT_TABLE_STREAMS; + } else if (strncasecmp(name, TSDB_PERFS_TABLE_APPS, len) == 0) { + type = TSDB_MGMT_TABLE_APPS; } else { // ASSERT(0); } diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index 023a28ce35..b6c387a9c8 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -609,7 +609,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea int32_t code = -1; STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq); if (pTrans == NULL) goto _OVER; - mndTransSetDbName(pTrans, pDb->name); + mndTransSetDbName(pTrans, pDb->name, NULL); mndTransSetSerial(pTrans); mDebug("trans:%d, used to create sma:%s", pTrans->id, pCreate->name); @@ -852,7 +852,7 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to drop sma:%s", pTrans->id, pSma->name); - mndTransSetDbName(pTrans, pDb->name); + mndTransSetDbName(pTrans, pDb->name, NULL); if (mndSetDropSmaRedoLogs(pMnode, pTrans, pSma) != 0) goto _OVER; if (mndSetDropSmaVgroupRedoLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index 9a27251ddf..dd555ec974 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -759,7 +759,7 @@ _OVER: } int32_t mndAddStbToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb) { - mndTransSetDbName(pTrans, pDb->name); + mndTransSetDbName(pTrans, pDb->name, NULL); if (mndSetCreateStbRedoLogs(pMnode, pTrans, pDb, pStb) != 0) return -1; if (mndSetCreateStbUndoLogs(pMnode, pTrans, pDb, pStb) != 0) return -1; if (mndSetCreateStbCommitLogs(pMnode, pTrans, pDb, pStb) != 0) return -1; @@ -1399,7 +1399,7 @@ static int32_t mndAlterStb(SMnode *pMnode, SRpcMsg *pReq, const SMAlterStbReq *p if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to alter stb:%s", pTrans->id, pAlter->name); - mndTransSetDbName(pTrans, pDb->name); + mndTransSetDbName(pTrans, pDb->name, NULL); if (needRsp) { void *pCont = NULL; @@ -1540,7 +1540,7 @@ static int32_t mndDropStb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *p if (pTrans == NULL) goto _OVER; mDebug("trans:%d, used to drop stb:%s", pTrans->id, pStb->name); - mndTransSetDbName(pTrans, pDb->name); + mndTransSetDbName(pTrans, pDb->name, NULL); if (mndSetDropStbRedoLogs(pMnode, pTrans, pStb) != 0) goto _OVER; if (mndSetDropStbCommitLogs(pMnode, pTrans, pStb) != 0) goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 8e82946d68..d432256f15 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -613,9 +613,9 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { goto _OVER; } - mndTransSetDbName(pTrans, createStreamReq.sourceDB); + mndTransSetDbName(pTrans, createStreamReq.sourceDB, NULL); // TODO - /*mndTransSetDbName(pTrans, streamObj.targetDb);*/ + /*mndTransSetDbName(pTrans, streamObj.targetDb, NULL);*/ mDebug("trans:%d, used to create stream:%s", pTrans->id, createStreamReq.name); // build stream obj from request diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 65a5d22bec..d2b7a61e83 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -403,7 +403,7 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOutputObj *pOutput) { STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pMsg); - mndTransSetDbName(pTrans, pOutput->pSub->dbName); + mndTransSetDbName(pTrans, pOutput->pSub->dbName, NULL); if (pTrans == NULL) return -1; // make txn: diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index 8883431ca8..3e3850de1a 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -46,13 +46,14 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM int32_t transId = sdbGetIdFromRaw(pMnode->pSdb, pRaw); pMgmt->errCode = cbMeta.code; - mDebug("trans:%d, is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64 " role:%s raw:%p", transId, - pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term, syncStr(cbMeta.state), pRaw); + mDebug("trans:%d, is proposed, saved:%d code:0x%x, apply index:%" PRId64 " term:%" PRIu64 " config:%" PRId64 + " role:%s raw:%p", + transId, pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term, cbMeta.lastConfigIndex, syncStr(cbMeta.state), + pRaw); if (pMgmt->errCode == 0) { sdbWriteWithoutFree(pMnode->pSdb, pRaw); - sdbSetApplyIndex(pMnode->pSdb, cbMeta.index); - sdbSetApplyTerm(pMnode->pSdb, cbMeta.term); + sdbSetApplyInfo(pMnode->pSdb, cbMeta.index, cbMeta.term, cbMeta.lastConfigIndex); } if (pMgmt->transId == transId) { @@ -68,36 +69,19 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM mndReleaseTrans(pMnode, pTrans); } - if (cbMeta.index - sdbGetApplyIndex(pMnode->pSdb) > 100) { - SSnapshotMeta sMeta = {0}; - // if (syncGetSnapshotMeta(pMnode->syncMgmt.sync, &sMeta) == 0) { - if (syncGetSnapshotMetaByIndex(pMnode->syncMgmt.sync, cbMeta.index, &sMeta) == 0) { - sdbSetCurConfig(pMnode->pSdb, sMeta.lastConfigIndex); - } - sdbWriteFile(pMnode->pSdb); - } + sdbWriteFile(pMnode->pSdb, SDB_WRITE_DELTA); } } int32_t mndSyncGetSnapshot(struct SSyncFSM *pFsm, SSnapshot *pSnapshot) { SMnode *pMnode = pFsm->data; - pSnapshot->lastApplyIndex = sdbGetCommitIndex(pMnode->pSdb); - pSnapshot->lastApplyTerm = sdbGetCommitTerm(pMnode->pSdb); - pSnapshot->lastConfigIndex = sdbGetCurConfig(pMnode->pSdb); + sdbGetCommitInfo(pMnode->pSdb, &pSnapshot->lastApplyIndex, &pSnapshot->lastApplyTerm, &pSnapshot->lastConfigIndex); return 0; } void mndRestoreFinish(struct SSyncFSM *pFsm) { SMnode *pMnode = pFsm->data; - SSnapshotMeta sMeta = {0}; - // if (syncGetSnapshotMeta(pMnode->syncMgmt.sync, &sMeta) == 0) { - - SyncIndex snapshotIndex = sdbGetApplyIndex(pMnode->pSdb); - if (syncGetSnapshotMetaByIndex(pMnode->syncMgmt.sync, snapshotIndex, &sMeta) == 0) { - sdbSetCurConfig(pMnode->pSdb, sMeta.lastConfigIndex); - } - if (!pMnode->deploy) { mInfo("mnode sync restore finished, and will handle outstanding transactions"); mndTransPullup(pMnode); diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index 8afb7ab354..9632c04f4c 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -566,7 +566,7 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { #endif STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq); - mndTransSetDbName(pTrans, pTopic->db); + mndTransSetDbName(pTrans, pTopic->db, NULL); if (pTrans == NULL) { mError("topic:%s, failed to drop since %s", pTopic->name, terrstr()); return -1; diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 858c6f956f..31a955b030 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -22,8 +22,8 @@ #include "mndSync.h" #include "mndUser.h" -#define TRANS_VER_NUMBER 1 -#define TRANS_ARRAY_SIZE 8 +#define TRANS_VER_NUMBER 1 +#define TRANS_ARRAY_SIZE 8 #define TRANS_RESERVE_SIZE 64 static SSdbRaw *mndTransActionEncode(STrans *pTrans); @@ -52,7 +52,7 @@ static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans); static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans); -static bool mndCantExecuteTransAction(SMnode *pMnode) { return !pMnode->deploy && !mndIsMaster(pMnode); } +static bool mndCannotExecuteTransAction(SMnode *pMnode) { return !pMnode->deploy && !mndIsMaster(pMnode); } static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans); static int32_t mndProcessTransReq(SRpcMsg *pReq); @@ -122,7 +122,8 @@ static SSdbRaw *mndTransActionEncode(STrans *pTrans) { SDB_SET_INT8(pRaw, dataPos, pTrans->conflict, _OVER) SDB_SET_INT8(pRaw, dataPos, pTrans->exec, _OVER) SDB_SET_INT64(pRaw, dataPos, pTrans->createdTime, _OVER) - SDB_SET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_DB_FNAME_LEN, _OVER) + SDB_SET_BINARY(pRaw, dataPos, pTrans->dbname1, TSDB_DB_FNAME_LEN, _OVER) + SDB_SET_BINARY(pRaw, dataPos, pTrans->dbname2, TSDB_DB_FNAME_LEN, _OVER) SDB_SET_INT32(pRaw, dataPos, pTrans->redoActionPos, _OVER) int32_t redoActionNum = taosArrayGetSize(pTrans->redoActions); @@ -270,7 +271,8 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) { pTrans->conflict = conflict; pTrans->exec = exec; SDB_GET_INT64(pRaw, dataPos, &pTrans->createdTime, _OVER) - SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_DB_FNAME_LEN, _OVER) + SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname1, TSDB_DB_FNAME_LEN, _OVER) + SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname2, TSDB_DB_FNAME_LEN, _OVER) SDB_GET_INT32(pRaw, dataPos, &pTrans->redoActionPos, _OVER) SDB_GET_INT32(pRaw, dataPos, &redoActionNum, _OVER) SDB_GET_INT32(pRaw, dataPos, &undoActionNum, _OVER) @@ -521,9 +523,10 @@ static int32_t mndTransActionUpdate(SSdb *pSdb, STrans *pOld, STrans *pNew) { } if (pOld->stage == TRN_STAGE_ROLLBACK) { - pOld->stage = TRN_STAGE_FINISHED; - mTrace("trans:%d, stage from rollback to finished since perform update action", pNew->id); + pOld->stage = TRN_STAGE_REDO_ACTION; + mTrace("trans:%d, stage from rollback to undoAction since perform update action", pNew->id); } + return 0; } @@ -649,7 +652,14 @@ void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void * pTrans->paramLen = paramLen; } -void mndTransSetDbName(STrans *pTrans, const char *dbname) { memcpy(pTrans->dbname, dbname, TSDB_DB_FNAME_LEN); } +void mndTransSetDbName(STrans *pTrans, const char *dbname1, const char *dbname2) { + if (dbname1 != NULL) { + memcpy(pTrans->dbname1, dbname1, TSDB_DB_FNAME_LEN); + } + if (dbname2 != NULL) { + memcpy(pTrans->dbname2, dbname2, TSDB_DB_FNAME_LEN); + } +} void mndTransSetSerial(STrans *pTrans) { pTrans->exec = TRN_EXEC_SERIAL; } @@ -661,7 +671,7 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) { } sdbSetRawStatus(pRaw, SDB_STATUS_READY); - mDebug("trans:%d, sync to other mnodes", pTrans->id); + mDebug("trans:%d, sync to other mnodes, stage:%s", pTrans->id, mndTransStr(pTrans->stage)); int32_t code = mndSyncPropose(pMnode, pRaw, pTrans->id); if (code != 0) { mError("trans:%d, failed to sync since %s", pTrans->id, terrstr()); @@ -674,6 +684,12 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) { return 0; } +static bool mndCheckDbConflict(const char *db, STrans *pTrans) { + if (db[0] == 0) return false; + if (strcmp(db, pTrans->dbname1) == 0 || strcmp(db, pTrans->dbname2) == 0) return true; + return false; +} + static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNew) { STrans *pTrans = NULL; void *pIter = NULL; @@ -688,14 +704,21 @@ static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNew) { if (pNew->conflict == TRN_CONFLICT_GLOBAL) conflict = true; if (pNew->conflict == TRN_CONFLICT_DB) { if (pTrans->conflict == TRN_CONFLICT_GLOBAL) conflict = true; - if (pTrans->conflict == TRN_CONFLICT_DB && strcmp(pNew->dbname, pTrans->dbname) == 0) conflict = true; - if (pTrans->conflict == TRN_CONFLICT_DB_INSIDE && strcmp(pNew->dbname, pTrans->dbname) == 0) conflict = true; + if (pTrans->conflict == TRN_CONFLICT_DB || pTrans->conflict == TRN_CONFLICT_DB_INSIDE) { + if (mndCheckDbConflict(pNew->dbname1, pTrans)) conflict = true; + if (mndCheckDbConflict(pNew->dbname2, pTrans)) conflict = true; + } } if (pNew->conflict == TRN_CONFLICT_DB_INSIDE) { if (pTrans->conflict == TRN_CONFLICT_GLOBAL) conflict = true; - if (pTrans->conflict == TRN_CONFLICT_DB && strcmp(pNew->dbname, pTrans->dbname) == 0) conflict = true; + if (pTrans->conflict == TRN_CONFLICT_DB) { + if (mndCheckDbConflict(pNew->dbname1, pTrans)) conflict = true; + if (mndCheckDbConflict(pNew->dbname2, pTrans)) conflict = true; + } } - mError("trans:%d, can't execute since conflict with trans:%d, db:%s", pNew->id, pTrans->id, pTrans->dbname); + + mError("trans:%d, can't execute since conflict with trans:%d, db1:%s db2:%s", pNew->id, pTrans->id, pTrans->dbname1, + pTrans->dbname2); sdbRelease(pMnode->pSdb, pTrans); } @@ -704,7 +727,7 @@ static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNew) { int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) { if (pTrans->conflict == TRN_CONFLICT_DB || pTrans->conflict == TRN_CONFLICT_DB_INSIDE) { - if (strlen(pTrans->dbname) == 0) { + if (strlen(pTrans->dbname1) == 0 && strlen(pTrans->dbname2) == 0) { terrno = TSDB_CODE_MND_TRANS_CONFLICT; mError("trans:%d, failed to prepare conflict db not set", pTrans->id); return -1; @@ -915,7 +938,7 @@ static int32_t mndTransWriteSingleLog(SMnode *pMnode, STrans *pTrans, STransActi static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransAction *pAction) { if (pAction->msgSent) return 0; - if (mndCantExecuteTransAction(pMnode)) return -1; + if (mndCannotExecuteTransAction(pMnode)) return -1; int64_t signature = pTrans->id; signature = (signature << 32); @@ -1115,7 +1138,7 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans) pTrans->lastEpset = pAction->epSet; } - if (mndCantExecuteTransAction(pMnode)) break; + if (mndCannotExecuteTransAction(pMnode)) break; if (code == 0) { pTrans->code = 0; @@ -1158,7 +1181,7 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) { code = mndTransExecuteRedoActions(pMnode, pTrans); } - if (mndCantExecuteTransAction(pMnode)) return false; + if (mndCannotExecuteTransAction(pMnode)) return false; if (code == 0) { pTrans->code = 0; @@ -1171,8 +1194,8 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) { } else { pTrans->code = terrno; if (pTrans->policy == TRN_POLICY_ROLLBACK) { - pTrans->stage = TRN_STAGE_UNDO_ACTION; - mError("trans:%d, stage from redoAction to undoAction since %s", pTrans->id, terrstr()); + pTrans->stage = TRN_STAGE_ROLLBACK; + mError("trans:%d, stage from redoAction to rollback since %s", pTrans->id, terrstr()); continueExec = true; } else { pTrans->failedTimes++; @@ -1185,7 +1208,7 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) { } static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans) { - if (mndCantExecuteTransAction(pMnode)) return false; + if (mndCannotExecuteTransAction(pMnode)) return false; bool continueExec = true; int32_t code = mndTransCommit(pMnode, pTrans); @@ -1197,16 +1220,9 @@ static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans) { continueExec = true; } else { pTrans->code = terrno; - if (pTrans->policy == TRN_POLICY_ROLLBACK) { - pTrans->stage = TRN_STAGE_UNDO_ACTION; - mError("trans:%d, stage from commit to undoAction since %s, failedTimes:%d", pTrans->id, terrstr(), - pTrans->failedTimes); - continueExec = true; - } else { - pTrans->failedTimes++; - mError("trans:%d, stage keep on commit since %s, failedTimes:%d", pTrans->id, terrstr(), pTrans->failedTimes); - continueExec = false; - } + pTrans->failedTimes++; + mError("trans:%d, stage keep on commit since %s, failedTimes:%d", pTrans->id, terrstr(), pTrans->failedTimes); + continueExec = false; } return continueExec; @@ -1235,11 +1251,9 @@ static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans) { bool continueExec = true; int32_t code = mndTransExecuteUndoActions(pMnode, pTrans); - if (mndCantExecuteTransAction(pMnode)) return false; - if (code == 0) { - pTrans->stage = TRN_STAGE_ROLLBACK; - mDebug("trans:%d, stage from undoAction to rollback", pTrans->id); + pTrans->stage = TRN_STAGE_FINISHED; + mDebug("trans:%d, stage from undoAction to finished", pTrans->id); continueExec = true; } else if (code == TSDB_CODE_ACTION_IN_PROGRESS) { mDebug("trans:%d, stage keep on undoAction since %s", pTrans->id, tstrerror(code)); @@ -1254,14 +1268,14 @@ static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans) { } static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans) { - if (mndCantExecuteTransAction(pMnode)) return false; + if (mndCannotExecuteTransAction(pMnode)) return false; bool continueExec = true; int32_t code = mndTransRollback(pMnode, pTrans); if (code == 0) { - pTrans->stage = TRN_STAGE_FINISHED; - mDebug("trans:%d, stage from rollback to finished", pTrans->id); + pTrans->stage = TRN_STAGE_UNDO_ACTION; + mDebug("trans:%d, stage from rollback to undoAction", pTrans->id); continueExec = true; } else { pTrans->failedTimes++; @@ -1309,12 +1323,12 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans) { case TRN_STAGE_COMMIT_ACTION: continueExec = mndTransPerformCommitActionStage(pMnode, pTrans); break; - case TRN_STAGE_UNDO_ACTION: - continueExec = mndTransPerformUndoActionStage(pMnode, pTrans); - break; case TRN_STAGE_ROLLBACK: continueExec = mndTransPerformRollbackStage(pMnode, pTrans); break; + case TRN_STAGE_UNDO_ACTION: + continueExec = mndTransPerformUndoActionStage(pMnode, pTrans); + break; case TRN_STAGE_FINISHED: continueExec = mndTransPerfromFinishedStage(pMnode, pTrans); break; @@ -1416,13 +1430,8 @@ void mndTransPullup(SMnode *pMnode) { mndReleaseTrans(pMnode, pTrans); } - SSnapshotMeta sMeta = {0}; - // if (syncGetSnapshotMeta(pMnode->syncMgmt.sync, &sMeta) == 0) { - SyncIndex snapshotIndex = sdbGetApplyIndex(pMnode->pSdb); - if (syncGetSnapshotMetaByIndex(pMnode->syncMgmt.sync, snapshotIndex, &sMeta) == 0) { - sdbSetCurConfig(pMnode->pSdb, sMeta.lastConfigIndex); - } - sdbWriteFile(pMnode->pSdb); + // todo, set to SDB_WRITE_DELTA + sdbWriteFile(pMnode->pSdb, 0); taosArrayDestroy(pArray); } @@ -1451,10 +1460,15 @@ static int32_t mndRetrieveTrans(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)stage, false); - char dbname[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_WITH_MAXSIZE_TO_VARSTR(dbname, mndGetDbStr(pTrans->dbname), pShow->pMeta->pSchemas[cols].bytes); + char dbname1[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_WITH_MAXSIZE_TO_VARSTR(dbname1, mndGetDbStr(pTrans->dbname1), pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataAppend(pColInfo, numOfRows, (const char *)dbname, false); + colDataAppend(pColInfo, numOfRows, (const char *)dbname1, false); + + char dbname2[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_WITH_MAXSIZE_TO_VARSTR(dbname2, mndGetDbStr(pTrans->dbname2), pShow->pMeta->pSchemas[cols].bytes); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)dbname2, false); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)&pTrans->failedTimes, false); diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c index cec83d1af5..94ddbcd409 100644 --- a/source/dnode/mnode/impl/src/mndVgroup.c +++ b/source/dnode/mnode/impl/src/mndVgroup.c @@ -1058,7 +1058,7 @@ int32_t mndSetMoveVgroupsInfoToTrans(SMnode *pMnode, STrans *pTrans, int32_t del static int32_t mndAddIncVgroupReplicaToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int32_t newDnodeId) { - mDebug("vgId:%d, will add 1 vnode, replica:%d, dnode:%d", pVgroup->vgId, pVgroup->replica, newDnodeId); + mDebug("vgId:%d, will add 1 vnode, replica:%d dnode:%d", pVgroup->vgId, pVgroup->replica, newDnodeId); SVnodeGid *pGid = &pVgroup->vnodeGid[pVgroup->replica]; pVgroup->replica++; @@ -1074,7 +1074,7 @@ static int32_t mndAddIncVgroupReplicaToTrans(SMnode *pMnode, STrans *pTrans, SDb static int32_t mndAddDecVgroupReplicaFromTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int32_t delDnodeId) { - mDebug("vgId:%d, will remove 1 vnode, replica:%d, dnode:%d", pVgroup->vgId, pVgroup->replica, delDnodeId); + mDebug("vgId:%d, will remove 1 vnode, replica:%d dnode:%d", pVgroup->vgId, pVgroup->replica, delDnodeId); SVnodeGid *pGid = NULL; SVnodeGid delGid = {0}; @@ -1116,7 +1116,8 @@ static int32_t mndRedistributeVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, memcpy(&newVg, pVgroup, sizeof(SVgObj)); mInfo("vgId:%d, vgroup info before redistribute, replica:%d", newVg.vgId, newVg.replica); for (int32_t i = 0; i < newVg.replica; ++i) { - mInfo("vgId:%d, vnode:%d dnode:%d", newVg.vgId, i, newVg.vnodeGid[i].dnodeId); + mInfo("vgId:%d, vnode:%d dnode:%d role:%s", newVg.vgId, i, newVg.vnodeGid[i].dnodeId, + syncStr(newVg.vnodeGid[i].role)); } if (pNew1 != NULL && pOld1 != NULL) { @@ -1198,7 +1199,7 @@ static int32_t mndProcessRedistributeVgroupMsg(SRpcMsg *pReq) { goto _OVER; } - mInfo("vgId:%d, start to redistribute to dnode %d:%d:%d", req.vgId, req.dnodeId1, req.dnodeId2, req.dnodeId3); + mInfo("vgId:%d, start to redistribute vgroup to dnode %d:%d:%d", req.vgId, req.dnodeId1, req.dnodeId2, req.dnodeId3); if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_REDISTRIBUTE_VGROUP) != 0) goto _OVER; diff --git a/source/dnode/mnode/impl/test/profile/profile.cpp b/source/dnode/mnode/impl/test/profile/profile.cpp index 039700ae9e..e784a41d6f 100644 --- a/source/dnode/mnode/impl/test/profile/profile.cpp +++ b/source/dnode/mnode/impl/test/profile/profile.cpp @@ -295,8 +295,7 @@ TEST_F(MndTestProfile, 07_KillQueryMsg) { TEST_F(MndTestProfile, 08_KillQueryMsg_InvalidConn) { SKillQueryReq killReq = {0}; - killReq.connId = 2345; - killReq.queryId = 2345; + strcpy(killReq.queryStrId, "2345:2345"); int32_t contLen = tSerializeSKillQueryReq(NULL, 0, &killReq); void* pReq = rpcMallocCont(contLen); diff --git a/source/dnode/mnode/impl/test/sdb/sdbTest.cpp b/source/dnode/mnode/impl/test/sdb/sdbTest.cpp index 43be55dd1d..bc118ee26e 100644 --- a/source/dnode/mnode/impl/test/sdb/sdbTest.cpp +++ b/source/dnode/mnode/impl/test/sdb/sdbTest.cpp @@ -493,8 +493,11 @@ TEST_F(MndTestSdb, 01_Write_Str) { ASSERT_EQ(sdbGetSize(pSdb, SDB_USER), 2); ASSERT_EQ(sdbGetMaxId(pSdb, SDB_USER), -1); ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 2); - sdbSetApplyIndex(pSdb, -1); - ASSERT_EQ(sdbGetApplyIndex(pSdb), -1); + sdbSetApplyInfo(pSdb, -1, -1, -1); + // int64_t index, config; + // int64_t term; + // sdbGetCommitInfo(pSdb, &index, &term, &config); + // ASSERT_EQ(index, -1); ASSERT_EQ(mnode.insertTimes, 2); ASSERT_EQ(mnode.deleteTimes, 0); @@ -700,11 +703,12 @@ TEST_F(MndTestSdb, 01_Write_Str) { } // write version - sdbSetApplyIndex(pSdb, 0); - sdbSetApplyIndex(pSdb, 1); - ASSERT_EQ(sdbGetApplyIndex(pSdb), 1); - ASSERT_EQ(sdbWriteFile(pSdb), 0); - ASSERT_EQ(sdbWriteFile(pSdb), 0); + sdbSetApplyInfo(pSdb, 0, 0, 0); + sdbSetApplyInfo(pSdb, 1, 0, 0); + // sdbGetApplyInfo(pSdb, &index, &term, &config); + // ASSERT_EQ(index, 1); + ASSERT_EQ(sdbWriteFile(pSdb, 0), 0); + ASSERT_EQ(sdbWriteFile(pSdb, 0), 0); sdbCleanup(pSdb); ASSERT_EQ(mnode.insertTimes, 7); @@ -772,7 +776,11 @@ TEST_F(MndTestSdb, 01_Read_Str) { ASSERT_EQ(sdbGetSize(pSdb, SDB_USER), 2); ASSERT_EQ(sdbGetMaxId(pSdb, SDB_USER), -1); ASSERT_EQ(sdbGetTableVer(pSdb, SDB_USER), 5); - ASSERT_EQ(sdbGetApplyIndex(pSdb), 1); + + int64_t index, config; + int64_t term; + sdbGetCommitInfo(pSdb, &index, &term, &config); + ASSERT_EQ(index, 1); ASSERT_EQ(mnode.insertTimes, 4); ASSERT_EQ(mnode.deleteTimes, 0); diff --git a/source/dnode/mnode/impl/test/trans/trans2.cpp b/source/dnode/mnode/impl/test/trans/trans2.cpp index 022c82c73d..aee8aa2748 100644 --- a/source/dnode/mnode/impl/test/trans/trans2.cpp +++ b/source/dnode/mnode/impl/test/trans/trans2.cpp @@ -128,7 +128,7 @@ class MndTestTrans2 : public ::testing::Test { mndTransSetCb(pTrans, TRANS_START_FUNC_TEST, TRANS_STOP_FUNC_TEST, param, strlen(param) + 1); if (pDb != NULL) { - mndTransSetDbName(pTrans, pDb->name); + mndTransSetDbName(pTrans, pDb->name, NULL); } int32_t code = mndTransPrepare(pMnode, pTrans); @@ -201,7 +201,7 @@ class MndTestTrans2 : public ::testing::Test { } if (pDb != NULL) { - mndTransSetDbName(pTrans, pDb->name); + mndTransSetDbName(pTrans, pDb->name, NULL); } int32_t code = mndTransPrepare(pMnode, pTrans); diff --git a/source/dnode/mnode/sdb/inc/sdb.h b/source/dnode/mnode/sdb/inc/sdb.h index 8536c451b7..1bd09aef63 100644 --- a/source/dnode/mnode/sdb/inc/sdb.h +++ b/source/dnode/mnode/sdb/inc/sdb.h @@ -37,6 +37,8 @@ extern "C" { #define mTrace(...) { if (mDebugFlag & DEBUG_TRACE) { taosPrintLog("MND ", DEBUG_TRACE, mDebugFlag, __VA_ARGS__); }} // clang-format on +#define SDB_WRITE_DELTA 100 + #define SDB_GET_VAL(pData, dataPos, val, pos, func, type) \ { \ if (func(pRaw, dataPos, val) != 0) { \ @@ -169,11 +171,12 @@ typedef struct SSdb { SWal *pWal; char *currDir; char *tmpDir; - int64_t lastCommitVer; - int64_t lastCommitTerm; - int64_t curVer; - int64_t curTerm; - int64_t curConfig; + int64_t commitIndex; + int64_t commitTerm; + int64_t commitConfig; + int64_t applyIndex; + int64_t applyTerm; + int64_t applyConfig; int64_t tableVer[SDB_MAX]; int64_t maxId[SDB_MAX]; EKeyType keyTypes[SDB_MAX]; @@ -257,7 +260,7 @@ int32_t sdbReadFile(SSdb *pSdb); * @param pSdb The sdb object. * @return int32_t 0 for success, -1 for failure. */ -int32_t sdbWriteFile(SSdb *pSdb); +int32_t sdbWriteFile(SSdb *pSdb, int32_t delta); /** * @brief Parse and write raw data to sdb, then free the pRaw object @@ -361,14 +364,8 @@ int64_t sdbGetTableVer(SSdb *pSdb, ESdbType type); * @param index The update value of the apply index. * @return int32_t The current index of sdb */ -void sdbSetApplyIndex(SSdb *pSdb, int64_t index); -void sdbSetApplyTerm(SSdb *pSdb, int64_t term); -void sdbSetCurConfig(SSdb *pSdb, int64_t config); -int64_t sdbGetApplyIndex(SSdb *pSdb); -int64_t sdbGetApplyTerm(SSdb *pSdb); -int64_t sdbGetCommitIndex(SSdb *pSdb); -int64_t sdbGetCommitTerm(SSdb *pSdb); -int64_t sdbGetCurConfig(SSdb *pSdb); +void sdbSetApplyInfo(SSdb *pSdb, int64_t index, int64_t term, int64_t config); +void sdbGetCommitInfo(SSdb *pSdb, int64_t *index, int64_t *term, int64_t *config); SSdbRaw *sdbAllocRaw(ESdbType type, int8_t sver, int32_t dataLen); void sdbFreeRaw(SSdbRaw *pRaw); diff --git a/source/dnode/mnode/sdb/src/sdb.c b/source/dnode/mnode/sdb/src/sdb.c index 39e9c75888..c44f1670c3 100644 --- a/source/dnode/mnode/sdb/src/sdb.c +++ b/source/dnode/mnode/sdb/src/sdb.c @@ -53,11 +53,12 @@ SSdb *sdbInit(SSdbOpt *pOption) { } pSdb->pWal = pOption->pWal; - pSdb->curVer = -1; - pSdb->curTerm = -1; - pSdb->lastCommitVer = -1; - pSdb->lastCommitTerm = -1; - pSdb->curConfig = -1; + pSdb->applyIndex = -1; + pSdb->applyTerm = -1; + pSdb->applyConfig = -1; + pSdb->commitIndex = -1; + pSdb->commitTerm = -1; + pSdb->commitConfig = -1; pSdb->pMnode = pOption->pMnode; taosThreadMutexInit(&pSdb->filelock, NULL); mDebug("sdb init successfully"); @@ -67,7 +68,7 @@ SSdb *sdbInit(SSdbOpt *pOption) { void sdbCleanup(SSdb *pSdb) { mDebug("start to cleanup sdb"); - sdbWriteFile(pSdb); + sdbWriteFile(pSdb, 0); if (pSdb->currDir != NULL) { taosMemoryFreeClear(pSdb->currDir); @@ -159,23 +160,20 @@ static int32_t sdbCreateDir(SSdb *pSdb) { return 0; } -void sdbSetApplyIndex(SSdb *pSdb, int64_t index) { pSdb->curVer = index; } - -void sdbSetApplyTerm(SSdb *pSdb, int64_t term) { pSdb->curTerm = term; } - -void sdbSetCurConfig(SSdb *pSdb, int64_t config) { - if (pSdb->curConfig != config) { - mDebug("mnode sync config set from %" PRId64 " to %" PRId64, pSdb->curConfig, config); - pSdb->curConfig = config; - } +void sdbSetApplyInfo(SSdb *pSdb, int64_t index, int64_t term, int64_t config) { + mTrace("mnode apply info changed, from index:%" PRId64 " term:%" PRId64 " config:%" PRId64 ", to index:%" PRId64 + " term:%" PRId64 " config:%" PRId64, + pSdb->applyIndex, pSdb->applyTerm, pSdb->applyConfig, index, term, config); + pSdb->applyIndex = index; + pSdb->applyTerm = term; + pSdb->applyConfig = config; } -int64_t sdbGetApplyIndex(SSdb *pSdb) { return pSdb->curVer; } - -int64_t sdbGetApplyTerm(SSdb *pSdb) { return pSdb->curTerm; } - -int64_t sdbGetCommitIndex(SSdb *pSdb) { return pSdb->lastCommitVer; } - -int64_t sdbGetCommitTerm(SSdb *pSdb) { return pSdb->lastCommitTerm; } - -int64_t sdbGetCurConfig(SSdb *pSdb) { return pSdb->curConfig; } \ No newline at end of file +void sdbGetCommitInfo(SSdb *pSdb, int64_t *index, int64_t *term, int64_t *config) { + *index = pSdb->commitIndex; + *term = pSdb->commitTerm; + *config = pSdb->commitConfig; + mTrace("mnode current info, apply index:%" PRId64 " term:%" PRId64 " config:%" PRId64 ", commit index:%" PRId64 + " term:%" PRId64 " config:%" PRId64, + pSdb->applyIndex, pSdb->applyTerm, pSdb->applyConfig, *index, *term, *config); +} diff --git a/source/dnode/mnode/sdb/src/sdbFile.c b/source/dnode/mnode/sdb/src/sdbFile.c index 34f5d6f23d..0f4e1276c1 100644 --- a/source/dnode/mnode/sdb/src/sdbFile.c +++ b/source/dnode/mnode/sdb/src/sdbFile.c @@ -67,10 +67,12 @@ static void sdbResetData(SSdb *pSdb) { mDebug("sdb:%s is reset", sdbTableName(i)); } - pSdb->curVer = -1; - pSdb->curTerm = -1; - pSdb->lastCommitVer = -1; - pSdb->lastCommitTerm = -1; + pSdb->applyIndex = -1; + pSdb->applyTerm = -1; + pSdb->applyConfig = -1; + pSdb->commitIndex = -1; + pSdb->commitTerm = -1; + pSdb->commitConfig = -1; mDebug("sdb reset successfully"); } @@ -90,7 +92,7 @@ static int32_t sdbReadFileHead(SSdb *pSdb, TdFilePtr pFile) { return -1; } - ret = taosReadFile(pFile, &pSdb->curVer, sizeof(int64_t)); + ret = taosReadFile(pFile, &pSdb->applyIndex, sizeof(int64_t)); if (ret < 0) { terrno = TAOS_SYSTEM_ERROR(errno); return -1; @@ -100,7 +102,7 @@ static int32_t sdbReadFileHead(SSdb *pSdb, TdFilePtr pFile) { return -1; } - ret = taosReadFile(pFile, &pSdb->curTerm, sizeof(int64_t)); + ret = taosReadFile(pFile, &pSdb->applyTerm, sizeof(int64_t)); if (ret < 0) { terrno = TAOS_SYSTEM_ERROR(errno); return -1; @@ -110,7 +112,7 @@ static int32_t sdbReadFileHead(SSdb *pSdb, TdFilePtr pFile) { return -1; } - ret = taosReadFile(pFile, &pSdb->curConfig, sizeof(int64_t)); + ret = taosReadFile(pFile, &pSdb->applyConfig, sizeof(int64_t)); if (ret < 0) { terrno = TAOS_SYSTEM_ERROR(errno); return -1; @@ -173,17 +175,17 @@ static int32_t sdbWriteFileHead(SSdb *pSdb, TdFilePtr pFile) { return -1; } - if (taosWriteFile(pFile, &pSdb->curVer, sizeof(int64_t)) != sizeof(int64_t)) { + if (taosWriteFile(pFile, &pSdb->applyIndex, sizeof(int64_t)) != sizeof(int64_t)) { terrno = TAOS_SYSTEM_ERROR(errno); return -1; } - if (taosWriteFile(pFile, &pSdb->curTerm, sizeof(int64_t)) != sizeof(int64_t)) { + if (taosWriteFile(pFile, &pSdb->applyTerm, sizeof(int64_t)) != sizeof(int64_t)) { terrno = TAOS_SYSTEM_ERROR(errno); return -1; } - if (taosWriteFile(pFile, &pSdb->curConfig, sizeof(int64_t)) != sizeof(int64_t)) { + if (taosWriteFile(pFile, &pSdb->applyConfig, sizeof(int64_t)) != sizeof(int64_t)) { terrno = TAOS_SYSTEM_ERROR(errno); return -1; } @@ -300,11 +302,12 @@ static int32_t sdbReadFileImp(SSdb *pSdb) { } code = 0; - pSdb->lastCommitVer = pSdb->curVer; - pSdb->lastCommitTerm = pSdb->curTerm; + pSdb->commitIndex = pSdb->applyIndex; + pSdb->commitTerm = pSdb->applyTerm; + pSdb->commitConfig = pSdb->applyConfig; memcpy(pSdb->tableVer, tableVer, sizeof(tableVer)); - mDebug("read sdb file:%s successfully, index:%" PRId64 " term:%" PRId64 " config:%" PRId64, file, pSdb->lastCommitVer, - pSdb->lastCommitTerm, pSdb->curConfig); + mDebug("read sdb file:%s successfully, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64, file, + pSdb->commitIndex, pSdb->commitTerm, pSdb->commitConfig); _OVER: taosCloseFile(&pFile); @@ -336,9 +339,10 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) { char curfile[PATH_MAX] = {0}; snprintf(curfile, sizeof(curfile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); - mDebug("start to write sdb file, current ver:%" PRId64 " term:%" PRId64 ", commit ver:%" PRId64 " term:%" PRId64 - " file:%s", - pSdb->curVer, pSdb->curTerm, pSdb->lastCommitVer, pSdb->lastCommitTerm, curfile); + mDebug("start to write sdb file, apply index:%" PRId64 " term:%" PRId64 " config:%" PRId64 ", commit index:%" PRId64 + " term:%" PRId64 " config:%" PRId64 ", file:%s", + pSdb->applyIndex, pSdb->applyTerm, pSdb->applyConfig, pSdb->commitIndex, pSdb->commitTerm, pSdb->commitConfig, + curfile); TdFilePtr pFile = taosOpenFile(tmpfile, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); if (pFile == NULL) { @@ -430,25 +434,30 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) { if (code != 0) { mError("failed to write sdb file:%s since %s", curfile, tstrerror(code)); } else { - pSdb->lastCommitVer = pSdb->curVer; - pSdb->lastCommitTerm = pSdb->curTerm; - mDebug("write sdb file successfully, index:%" PRId64 " term:%" PRId64 " config:%" PRId64 " file:%s", - pSdb->lastCommitVer, pSdb->lastCommitTerm, pSdb->curConfig, curfile); + pSdb->commitIndex = pSdb->applyIndex; + pSdb->commitTerm = pSdb->applyTerm; + pSdb->commitConfig = pSdb->applyConfig; + mDebug("write sdb file successfully, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64 " file:%s", + pSdb->commitIndex, pSdb->commitTerm, pSdb->commitConfig, curfile); } terrno = code; return code; } -int32_t sdbWriteFile(SSdb *pSdb) { +int32_t sdbWriteFile(SSdb *pSdb, int32_t delta) { int32_t code = 0; - if (pSdb->curVer == pSdb->lastCommitVer) { + if (pSdb->applyIndex == pSdb->commitIndex) { + return 0; + } + + if (pSdb->applyIndex - pSdb->commitIndex < delta) { return 0; } taosThreadMutexLock(&pSdb->filelock); if (pSdb->pWal != NULL) { - code = walBeginSnapshot(pSdb->pWal, pSdb->curVer); + code = walBeginSnapshot(pSdb->pWal, pSdb->applyIndex); } if (code == 0) { code = sdbWriteFileImp(pSdb); @@ -470,7 +479,7 @@ int32_t sdbDeploy(SSdb *pSdb) { return -1; } - if (sdbWriteFile(pSdb) != 0) { + if (sdbWriteFile(pSdb, 0) != 0) { return -1; } @@ -522,9 +531,9 @@ int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter) { snprintf(datafile, sizeof(datafile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP); taosThreadMutexLock(&pSdb->filelock); - int64_t commitIndex = pSdb->lastCommitVer; - int64_t commitTerm = pSdb->lastCommitTerm; - int64_t curConfig = pSdb->curConfig; + int64_t commitIndex = pSdb->commitIndex; + int64_t commitTerm = pSdb->commitTerm; + int64_t commitConfig = pSdb->commitConfig; if (taosCopyFile(datafile, pIter->name) < 0) { taosThreadMutexUnlock(&pSdb->filelock); terrno = TAOS_SYSTEM_ERROR(errno); @@ -543,8 +552,8 @@ int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter) { } *ppIter = pIter; - mInfo("sdbiter:%p, is created to read snapshot, index:%" PRId64 " term:%" PRId64 " config:%" PRId64 " file:%s", pIter, - commitIndex, commitTerm, curConfig, pIter->name); + mInfo("sdbiter:%p, is created to read snapshot, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64 " file:%s", + pIter, commitIndex, commitTerm, commitConfig, pIter->name); return 0; } diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 4f43f99c85..a5ca90e55f 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -662,7 +662,7 @@ int32_t metaFilteTableIds(SMeta *pMeta, SMetaFltParam *param, SArray *pUids) { void * tagData = NULL; if (param->val == NULL) { - metaError("vgId:%d failed to filter NULL data", TD_VID(pMeta->pVnode)); + metaError("vgId:%d, failed to filter NULL data", TD_VID(pMeta->pVnode)); return -1; } else { if (IS_VAR_DATA_TYPE(param->type)) { diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index c9d3267adb..70b09ec701 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -124,17 +124,20 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen) { tDecoderClear(&decoder); if (offset.type == TMQ_OFFSET__SNAPSHOT) { - tqDebug("receive offset commit msg to %s, offset(type:snapshot) uid: %ld, ts: %ld", offset.subKey, offset.uid, - offset.ts); + tqDebug("receive offset commit msg to %s on vg %d, offset(type:snapshot) uid: %ld, ts: %ld", offset.subKey, + pTq->pVnode->config.vgId, offset.uid, offset.ts); } else if (offset.type == TMQ_OFFSET__LOG) { - tqDebug("receive offset commit msg to %s, offset(type:log) version: %ld", offset.subKey, offset.version); + tqDebug("receive offset commit msg to %s on vg %d, offset(type:log) version: %ld", offset.subKey, + pTq->pVnode->config.vgId, offset.version); } else { ASSERT(0); } - - if (tqOffsetWrite(pTq->pOffsetStore, &offset) < 0) { - ASSERT(0); - return -1; + STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, offset.subKey); + if (pOffset == NULL || pOffset->version < offset.version) { + if (tqOffsetWrite(pTq->pOffsetStore, &offset) < 0) { + ASSERT(0); + return -1; + } } return 0; @@ -149,16 +152,33 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) { int32_t code = 0; // get offset to fetch message - if (pReq->currentOffset == TMQ_CONF__RESET_OFFSET__EARLIEAST) { - fetchOffset = walGetFirstVer(pTq->pWal); - } else if (pReq->currentOffset == TMQ_CONF__RESET_OFFSET__LATEST) { - fetchOffset = walGetCommittedVer(pTq->pWal); - } else { + if (pReq->currentOffset >= 0) { fetchOffset = pReq->currentOffset + 1; + } else { + STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, pReq->subKey); + if (pOffset != NULL) { + ASSERT(pOffset->type == TMQ_OFFSET__LOG); + tqDebug("consumer %ld, restore offset of %s on vg %d, offset(type:log) version: %ld", consumerId, pReq->subKey, + pTq->pVnode->config.vgId, pOffset->version); + fetchOffset = pOffset->version + 1; + } else { + if (pReq->currentOffset == TMQ_CONF__RESET_OFFSET__EARLIEAST) { + fetchOffset = walGetFirstVer(pTq->pWal); + } else if (pReq->currentOffset == TMQ_CONF__RESET_OFFSET__LATEST) { + fetchOffset = walGetCommittedVer(pTq->pWal); + } else if (pReq->currentOffset == TMQ_CONF__RESET_OFFSET__NONE) { + tqError("tmq poll: no offset committed for consumer %ld in vg %d, subkey %s", consumerId, + pTq->pVnode->config.vgId, pReq->subKey); + terrno = TSDB_CODE_TQ_NO_COMMITTED_OFFSET; + return -1; + } + tqDebug("consumer %ld, restore offset of %s on vg %d failed, config is %ld, set to %ld", consumerId, pReq->subKey, + pTq->pVnode->config.vgId, pReq->currentOffset, fetchOffset); + } } - tqDebug("tmq poll: consumer %ld (epoch %d) recv poll req in vg %d, req %ld %ld", consumerId, pReq->epoch, - TD_VID(pTq->pVnode), pReq->currentOffset, fetchOffset); + tqDebug("tmq poll: consumer %ld (epoch %d) recv poll req in vg %d, req offset %ld fetch offset %ld", consumerId, + pReq->epoch, TD_VID(pTq->pVnode), pReq->currentOffset, fetchOffset); STqHandle* pHandle = taosHashGet(pTq->handles, pReq->subKey, strlen(pReq->subKey)); /*ASSERT(pHandle);*/ diff --git a/source/dnode/vnode/src/tq/tqCommit.c b/source/dnode/vnode/src/tq/tqCommit.c index 7b116bff2e..639da22b1c 100644 --- a/source/dnode/vnode/src/tq/tqCommit.c +++ b/source/dnode/vnode/src/tq/tqCommit.c @@ -15,7 +15,4 @@ #include "tq.h" -int tqCommit(STQ* pTq) { - // do nothing - return 0; -} +int tqCommit(STQ* pTq) { return tqOffsetSnapshot(pTq->pOffsetStore); } diff --git a/source/dnode/vnode/src/tq/tqOffset.c b/source/dnode/vnode/src/tq/tqOffset.c index 8d6cb28065..e5475d7c30 100644 --- a/source/dnode/vnode/src/tq/tqOffset.c +++ b/source/dnode/vnode/src/tq/tqOffset.c @@ -92,6 +92,8 @@ STqOffset* tqOffsetRead(STqOffsetStore* pStore, const char* subscribeKey) { } int32_t tqOffsetWrite(STqOffsetStore* pStore, const STqOffset* pOffset) { + ASSERT(pOffset->type == TMQ_OFFSET__LOG); + ASSERT(pOffset->version >= 0); return taosHashPut(pStore->pHash, pOffset->subKey, strlen(pOffset->subKey), pOffset, sizeof(STqOffset)); } @@ -129,7 +131,7 @@ int32_t tqOffsetSnapshot(STqOffsetStore* pStore) { tEncodeSTqOffset(&encoder, pOffset); // write file int64_t writeLen; - if ((writeLen = taosWriteFile(pFile, buf, totLen)) != bodyLen) { + if ((writeLen = taosWriteFile(pFile, buf, totLen)) != totLen) { ASSERT(0); tqError("write offset incomplete, len %d, write len %ld", bodyLen, writeLen); taosHashCancelIterate(pStore->pHash, pIter); diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c index fe89321ae9..e98fd8ae1f 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit.c +++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c @@ -111,7 +111,7 @@ int32_t tsdbBegin(STsdb *pTsdb) { int32_t tsdbCommit(STsdb *pTsdb) { if (!pTsdb) return 0; - + int32_t code = 0; SCommitH commith = {0}; SDFileSet *pSet = NULL; @@ -495,7 +495,9 @@ static int32_t tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) { break; } - if (pIter && pIter->pTable && (!pIdx || (pIter->pTable->suid <= pIdx->suid || pIter->pTable->uid <= pIdx->uid))) { + if (pIter && pIter->pTable && + (!pIdx || ((pIter->pTable->suid < pIdx->suid) || + ((pIter->pTable->suid == pIdx->suid) && (pIter->pTable->uid <= pIdx->uid))))) { if (tsdbCommitToTable(pCommith, mIter) < 0) { tsdbCloseCommitFile(pCommith, true); // revert the file change @@ -503,7 +505,7 @@ static int32_t tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) { return -1; } - if (pIdx && (pIter->pTable->uid == pIdx->uid)) { + if (pIdx && ((pIter->pTable->uid == pIdx->uid) && (pIter->pTable->suid == pIdx->suid))) { ++fIter; } ++mIter; @@ -518,6 +520,8 @@ static int32_t tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) { return -1; } ++fIter; + } else { + ASSERT(0); } } diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index f46114746e..ea3494594e 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -61,6 +61,7 @@ static int32_t vnodeSetStandBy(SVnode *pVnode) { return -1; } + vInfo("vgId:%d, start to transfer leader", TD_VID(pVnode)); if (syncLeaderTransfer(pVnode->sync) != 0) { vError("vgId:%d, failed to transfer leader since:%s", TD_VID(pVnode), terrstr()); return -1; @@ -179,8 +180,8 @@ void vnodeApplyMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) { for (int32_t i = 0; i < numOfMsgs; ++i) { if (taosGetQitem(qall, (void **)&pMsg) == 0) continue; - vTrace("vgId:%d, msg:%p get from vnode-apply queue, type:%s handle:%p", vgId, pMsg, TMSG_INFO(pMsg->msgType), - pMsg->info.handle); + vTrace("vgId:%d, msg:%p get from vnode-apply queue, index:%" PRId64 " type:%s handle:%p", vgId, pMsg, + pMsg->info.conn.applyIndex, TMSG_INFO(pMsg->msgType), pMsg->info.handle); SRpcMsg rsp = {.code = pMsg->code, .info = pMsg->info}; if (rsp.code == 0) { @@ -297,7 +298,7 @@ int32_t vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) { ret = -1; } - if (ret != 0) { + if (ret != 0 && terrno == 0) { terrno = TSDB_CODE_SYN_INTERNAL_ERROR; } return ret; @@ -333,8 +334,8 @@ static void vnodeSyncReconfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReCon syncGetAndDelRespRpc(pVnode->sync, cbMeta.seqNum, &rpcMsg.info); rpcMsg.info.conn.applyIndex = cbMeta.index; - vInfo("vgId:%d, alter vnode replica is confirmed, type:%s contLen:%d seq:%" PRIu64 " handle:%p", TD_VID(pVnode), - TMSG_INFO(pMsg->msgType), pMsg->contLen, cbMeta.seqNum, rpcMsg.info.handle); + vInfo("vgId:%d, alter vnode replica is confirmed, type:%s contLen:%d seq:%" PRIu64 " index:%" PRId64 " handle:%p", + TD_VID(pVnode), TMSG_INFO(pMsg->msgType), pMsg->contLen, cbMeta.seqNum, cbMeta.index, rpcMsg.info.handle); if (rpcMsg.info.handle != NULL) { tmsgSendRsp(&rpcMsg); } diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index df4b9b6ea6..7c90c3538c 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -59,6 +59,7 @@ enum { CTG_OP_UPDATE_VG_EPSET, CTG_OP_UPDATE_TB_INDEX, CTG_OP_DROP_TB_INDEX, + CTG_OP_CLEAR_CACHE, CTG_OP_MAX }; @@ -328,6 +329,10 @@ typedef struct SCtgDropTbIndexMsg { char tbName[TSDB_TABLE_NAME_LEN]; } SCtgDropTbIndexMsg; +typedef struct SCtgClearCacheMsg { + SCatalog* pCtg; +} SCtgClearCacheMsg; + typedef struct SCtgUpdateEpsetMsg { SCatalog* pCtg; char dbFName[TSDB_DB_FNAME_LEN]; @@ -471,8 +476,8 @@ typedef struct SCtgOperation { #define CTG_API_LEAVE(c) do { int32_t __code = c; CTG_UNLOCK(CTG_READ, &gCtgMgmt.lock); CTG_API_DEBUG("CTG API leave %s", __FUNCTION__); CTG_RET(__code); } while (0) #define CTG_API_ENTER() do { CTG_API_DEBUG("CTG API enter %s", __FUNCTION__); CTG_LOCK(CTG_READ, &gCtgMgmt.lock); if (atomic_load_8((int8_t*)&gCtgMgmt.exit)) { CTG_API_LEAVE(TSDB_CODE_CTG_OUT_OF_SERVICE); } } while (0) -void ctgdShowTableMeta(SCatalog* pCtg, const char *tbName, STableMeta* p); -void ctgdShowClusterCache(SCatalog* pCtg); +void ctgdShowTableMeta(SCatalog* pCtg, const char *tbName, STableMeta* p); +void ctgdShowClusterCache(SCatalog* pCtg); int32_t ctgdShowCacheInfo(void); int32_t ctgRemoveTbMetaFromCache(SCatalog* pCtg, SName* pTableName, bool syncReq); @@ -487,8 +492,8 @@ int32_t ctgOpDropTbMeta(SCtgCacheOperation *action); int32_t ctgOpUpdateUser(SCtgCacheOperation *action); int32_t ctgOpUpdateEpset(SCtgCacheOperation *operation); int32_t ctgAcquireVgInfoFromCache(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache); -void ctgReleaseDBCache(SCatalog *pCtg, SCtgDBCache *dbCache); -void ctgRUnlockVgInfo(SCtgDBCache *dbCache); +void ctgReleaseDBCache(SCatalog *pCtg, SCtgDBCache *dbCache); +void ctgRUnlockVgInfo(SCtgDBCache *dbCache); int32_t ctgTbMetaExistInCache(SCatalog* pCtg, char *dbFName, char* tbName, int32_t *exist); int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta); int32_t ctgReadTbVerFromCache(SCatalog *pCtg, SName *pTableName, int32_t *sver, int32_t *tver, int32_t *tbType, uint64_t *suid, char *stbName); @@ -502,17 +507,20 @@ int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool sy int32_t ctgUpdateUserEnqueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncReq); int32_t ctgUpdateVgEpsetEnqueue(SCatalog* pCtg, char *dbFName, int32_t vgId, SEpSet* pEpSet); int32_t ctgUpdateTbIndexEnqueue(SCatalog* pCtg, STableIndex **pIndex, bool syncOp); +int32_t ctgClearCacheEnqueue(SCatalog* pCtg, bool syncOp); int32_t ctgMetaRentInit(SCtgRentMgmt *mgmt, uint32_t rentSec, int8_t type); int32_t ctgMetaRentAdd(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t size); int32_t ctgMetaRentGet(SCtgRentMgmt *mgmt, void **res, uint32_t *num, int32_t size); int32_t ctgUpdateTbMetaToCache(SCatalog* pCtg, STableMetaOutput* pOut, bool syncReq); int32_t ctgStartUpdateThread(); int32_t ctgRelaunchGetTbMetaTask(SCtgTask *pTask); -void ctgReleaseVgInfoToCache(SCatalog* pCtg, SCtgDBCache *dbCache); +void ctgReleaseVgInfoToCache(SCatalog* pCtg, SCtgDBCache *dbCache); int32_t ctgReadTbIndexFromCache(SCatalog* pCtg, SName* pTableName, SArray** pRes); int32_t ctgDropTbIndexEnqueue(SCatalog* pCtg, SName* pName, bool syncOp); int32_t ctgOpDropTbIndex(SCtgCacheOperation *operation); int32_t ctgOpUpdateTbIndex(SCtgCacheOperation *operation); +int32_t ctgOpClearCache(SCtgCacheOperation *operation); + @@ -535,22 +543,22 @@ int32_t ctgMakeAsyncRes(SCtgJob *pJob); int32_t ctgCloneVgInfo(SDBVgInfo *src, SDBVgInfo **dst); int32_t ctgCloneMetaOutput(STableMetaOutput *output, STableMetaOutput **pOutput); int32_t ctgGenerateVgList(SCatalog *pCtg, SHashObj *vgHash, SArray** pList); -void ctgFreeJob(void* job); -void ctgFreeHandle(SCatalog* pCtg); -void ctgFreeVgInfo(SDBVgInfo *vgInfo); +void ctgFreeJob(void* job); +void ctgFreeHandle(SCatalog* pCtg); +void ctgFreeVgInfo(SDBVgInfo *vgInfo); int32_t ctgGetVgInfoFromHashValue(SCatalog *pCtg, SDBVgInfo *dbInfo, const SName *pTableName, SVgroupInfo *pVgroup); -void ctgResetTbMetaTask(SCtgTask* pTask); -void ctgFreeDbCache(SCtgDBCache *dbCache); +void ctgResetTbMetaTask(SCtgTask* pTask); +void ctgFreeDbCache(SCtgDBCache *dbCache); int32_t ctgStbVersionSortCompare(const void* key1, const void* key2); int32_t ctgDbVgVersionSortCompare(const void* key1, const void* key2); int32_t ctgStbVersionSearchCompare(const void* key1, const void* key2); int32_t ctgDbVgVersionSearchCompare(const void* key1, const void* key2); -void ctgFreeSTableMetaOutput(STableMetaOutput* pOutput); +void ctgFreeSTableMetaOutput(STableMetaOutput* pOutput); int32_t ctgUpdateMsgCtx(SCtgMsgCtx* pCtx, int32_t reqType, void* out, char* target); -char *ctgTaskTypeStr(CTG_TASK_TYPE type); +char * ctgTaskTypeStr(CTG_TASK_TYPE type); int32_t ctgUpdateSendTargetInfo(SMsgSendInfo *pMsgSendInfo, int32_t msgType, SCtgTask* pTask); int32_t ctgCloneTableIndex(SArray* pIndex, SArray** pRes); -void ctgFreeSTableIndex(void *info); +void ctgFreeSTableIndex(void *info); extern SCatalogMgmt gCtgMgmt; diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c index a3576f8738..bb02895569 100644 --- a/source/libs/catalog/src/catalog.c +++ b/source/libs/catalog/src/catalog.c @@ -105,7 +105,7 @@ int32_t ctgRefreshDBVgInfo(SCatalog* pCtg, SRequestConnInfo *pConn, const char* code = ctgGetDBVgInfoFromMnode(pCtg, pConn, &input, &DbOut, NULL); if (code) { if (CTG_DB_NOT_EXIST(code) && (NULL != dbCache)) { - ctgDebug("db no longer exist, dbFName:%s, dbId:%" PRIx64, input.db, input.dbId); + ctgDebug("db no longer exist, dbFName:%s, dbId:0x%" PRIx64, input.db, input.dbId); ctgDropDbCacheEnqueue(pCtg, input.db, input.dbId); } @@ -571,7 +571,7 @@ int32_t catalogGetHandle(uint64_t clusterId, SCatalog** catalogHandle) { } if (NULL == gCtgMgmt.pCluster) { - qError("catalog cluster cache are not ready, clusterId:%" PRIx64, clusterId); + qError("catalog cluster cache are not ready, clusterId:0x%" PRIx64, clusterId); CTG_ERR_RET(TSDB_CODE_CTG_NOT_READY); } @@ -583,7 +583,7 @@ int32_t catalogGetHandle(uint64_t clusterId, SCatalog** catalogHandle) { if (ctg && (*ctg)) { *catalogHandle = *ctg; - qDebug("got catalog handle from cache, clusterId:%" PRIx64 ", CTG:%p", clusterId, *ctg); + qDebug("got catalog handle from cache, clusterId:0x%" PRIx64 ", CTG:%p", clusterId, *ctg); return TSDB_CODE_SUCCESS; } @@ -612,11 +612,11 @@ int32_t catalogGetHandle(uint64_t clusterId, SCatalog** catalogHandle) { continue; } - qError("taosHashPut CTG to cache failed, clusterId:%" PRIx64, clusterId); + qError("taosHashPut CTG to cache failed, clusterId:0x%" PRIx64, clusterId); CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); } - qDebug("add CTG to cache, clusterId:%" PRIx64 ", CTG:%p", clusterId, clusterCtg); + qDebug("add CTG to cache, clusterId:0x%" PRIx64 ", CTG:%p", clusterId, clusterCtg); break; } @@ -640,7 +640,7 @@ void catalogFreeHandle(SCatalog* pCtg) { } if (taosHashRemove(gCtgMgmt.pCluster, &pCtg->clusterId, sizeof(pCtg->clusterId))) { - ctgWarn("taosHashRemove from cluster failed, may already be freed, clusterId:%" PRIx64, pCtg->clusterId); + ctgWarn("taosHashRemove from cluster failed, may already be freed, clusterId:0x%" PRIx64, pCtg->clusterId); return; } @@ -650,7 +650,7 @@ void catalogFreeHandle(SCatalog* pCtg) { ctgFreeHandle(pCtg); - ctgInfo("handle freed, culsterId:%" PRIx64, clusterId); + ctgInfo("handle freed, culsterId:0x%" PRIx64, clusterId); } int32_t catalogGetDBVgVersion(SCatalog* pCtg, const char* dbFName, int32_t* version, int64_t* dbId, int32_t* tableNum) { @@ -1247,6 +1247,23 @@ int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth) { CTG_API_LEAVE(ctgUpdateUserEnqueue(pCtg, pAuth, false)); } +int32_t catalogClearCache(void) { + CTG_API_ENTER(); + + qInfo("start to clear catalog cache"); + + if (NULL == gCtgMgmt.pCluster || atomic_load_8((int8_t*)&gCtgMgmt.exit)) { + CTG_API_LEAVE(TSDB_CODE_SUCCESS); + } + + int32_t code = ctgClearCacheEnqueue(NULL, true); + + qInfo("clear catalog cache end, code: %s", tstrerror(code)); + + CTG_API_LEAVE(code); +} + + void catalogDestroy(void) { qInfo("start to destroy catalog"); diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c index 8dabd56934..6adadf5045 100644 --- a/source/libs/catalog/src/ctgAsync.c +++ b/source/libs/catalog/src/ctgAsync.c @@ -622,7 +622,7 @@ int32_t ctgHandleTaskEnd(SCtgTask* pTask, int32_t rspCode) { SCtgJob* pJob = pTask->pJob; int32_t code = 0; - qDebug("QID:0x%" PRIx64 " task %d end with rsp %s", pJob->queryId, pTask->taskId, tstrerror(rspCode)); + qDebug("QID:0x%" PRIx64 " task %d end with res %s", pJob->queryId, pTask->taskId, tstrerror(rspCode)); pTask->code = rspCode; @@ -1276,7 +1276,7 @@ int32_t ctgLaunchJob(SCtgJob *pJob) { for (int32_t i = 0; i < taskNum; ++i) { SCtgTask *pTask = taosArrayGet(pJob->pTasks, i); - qDebug("QID:0x%" PRIx64 " start to launch task %d", pJob->queryId, pTask->taskId); + qDebug("QID:0x%" PRIx64 " ctg start to launch task %d", pJob->queryId, pTask->taskId); CTG_ERR_RET((*gCtgAsyncFps[pTask->type].launchFp)(pTask)); } diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c index 0948c01270..1de5ee3d7d 100644 --- a/source/libs/catalog/src/ctgCache.c +++ b/source/libs/catalog/src/ctgCache.c @@ -69,6 +69,11 @@ SCtgOperation gCtgCacheOperation[CTG_OP_MAX] = { CTG_OP_DROP_TB_INDEX, "drop tbIndex", ctgOpDropTbIndex + }, + { + CTG_OP_CLEAR_CACHE, + "clear cache", + ctgOpClearCache } }; @@ -81,7 +86,7 @@ int32_t ctgRLockVgInfo(SCatalog *pCtg, SCtgDBCache *dbCache, bool *inCache) { if (dbCache->deleted) { CTG_UNLOCK(CTG_READ, &dbCache->vgCache.vgLock); - ctgDebug("db is dropping, dbId:%"PRIx64, dbCache->dbId); + ctgDebug("db is dropping, dbId:0x%"PRIx64, dbCache->dbId); *inCache = false; return TSDB_CODE_SUCCESS; @@ -92,7 +97,7 @@ int32_t ctgRLockVgInfo(SCatalog *pCtg, SCtgDBCache *dbCache, bool *inCache) { CTG_UNLOCK(CTG_READ, &dbCache->vgCache.vgLock); *inCache = false; - ctgDebug("db vgInfo is empty, dbId:%"PRIx64, dbCache->dbId); + ctgDebug("db vgInfo is empty, dbId:0x%"PRIx64, dbCache->dbId); return TSDB_CODE_SUCCESS; } @@ -105,7 +110,7 @@ int32_t ctgWLockVgInfo(SCatalog *pCtg, SCtgDBCache *dbCache) { CTG_LOCK(CTG_WRITE, &dbCache->vgCache.vgLock); if (dbCache->deleted) { - ctgDebug("db is dropping, dbId:%"PRIx64, dbCache->dbId); + ctgDebug("db is dropping, dbId:0x%"PRIx64, dbCache->dbId); CTG_UNLOCK(CTG_WRITE, &dbCache->vgCache.vgLock); CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED); } @@ -280,27 +285,27 @@ int32_t ctgAcquireStbMetaFromCache(SCatalog* pCtg, char *dbFName, uint64_t suid, int32_t sz = 0; char* stName = taosHashAcquire(dbCache->stbCache, &suid, sizeof(suid)); if (NULL == stName) { - ctgDebug("stb %" PRIx64 " not in cache, dbFName:%s", suid, dbFName); + ctgDebug("stb 0x%" PRIx64 " not in cache, dbFName:%s", suid, dbFName); goto _return; } pCache = taosHashAcquire(dbCache->tbCache, stName, strlen(stName)); if (NULL == pCache) { - ctgDebug("stb %" PRIx64 " name %s not in cache, dbFName:%s", suid, stName, dbFName); + ctgDebug("stb 0x%" PRIx64 " name %s not in cache, dbFName:%s", suid, stName, dbFName); taosHashRelease(dbCache->stbCache, stName); goto _return; } CTG_LOCK(CTG_READ, &pCache->metaLock); if (NULL == pCache->pMeta) { - ctgDebug("stb %" PRIx64 " meta not in cache, dbFName:%s", suid, dbFName); + ctgDebug("stb 0x%" PRIx64 " meta not in cache, dbFName:%s", suid, dbFName); goto _return; } *pDb = dbCache; *pTb = pCache; - ctgDebug("stb %" PRIx64 " meta got in cache, dbFName:%s", suid, dbFName); + ctgDebug("stb 0x%" PRIx64 " meta got in cache, dbFName:%s", suid, dbFName); CTG_CACHE_STAT_INC(tbMetaHitNum, 1); @@ -434,14 +439,14 @@ int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta** if (NULL == tbCache) { ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache); taosMemoryFreeClear(*pTableMeta); - ctgDebug("stb %" PRIx64 " meta not in cache", ctx->tbInfo.suid); + ctgDebug("stb 0x%" PRIx64 " meta not in cache", ctx->tbInfo.suid); return TSDB_CODE_SUCCESS; } STableMeta* stbMeta = tbCache->pMeta; if (stbMeta->suid != ctx->tbInfo.suid) { ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache); - ctgError("stb suid %" PRIx64 " in stbCache mis-match, expected suid:%"PRIx64 , stbMeta->suid, ctx->tbInfo.suid); + ctgError("stb suid 0x%" PRIx64 " in stbCache mis-match, expected suid 0x%"PRIx64 , stbMeta->suid, ctx->tbInfo.suid); CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); } @@ -492,7 +497,7 @@ int32_t ctgReadTbVerFromCache(SCatalog *pCtg, SName *pTableName, int32_t *sver, *sver = tbMeta->sversion; *tver = tbMeta->tversion; - ctgDebug("Got tb %s ver from cache, dbFName:%s, tbType:%d, sver:%d, tver:%d, suid:%" PRIx64, + ctgDebug("Got tb %s ver from cache, dbFName:%s, tbType:%d, sver:%d, tver:%d, suid:0x%" PRIx64, pTableName->tname, dbFName, *tbType, *sver, *tver, *suid); ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache); @@ -507,14 +512,14 @@ int32_t ctgReadTbVerFromCache(SCatalog *pCtg, SName *pTableName, int32_t *sver, ctgAcquireStbMetaFromCache(pCtg, dbFName, *suid, &dbCache, &tbCache); if (NULL == tbCache) { ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache); - ctgDebug("stb %" PRIx64 " meta not in cache", *suid); + ctgDebug("stb 0x%" PRIx64 " meta not in cache", *suid); return TSDB_CODE_SUCCESS; } STableMeta* stbMeta = tbCache->pMeta; if (stbMeta->suid != *suid) { ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache); - ctgError("stb suid %" PRIx64 " in stbCache mis-match, expected suid:%" PRIx64 , stbMeta->suid, *suid); + ctgError("stb suid 0x%" PRIx64 " in stbCache mis-match, expected suid:0x%" PRIx64 , stbMeta->suid, *suid); CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR); } @@ -990,6 +995,33 @@ _return: } +int32_t ctgClearCacheEnqueue(SCatalog* pCtg, bool syncOp) { + int32_t code = 0; + SCtgCacheOperation *op = taosMemoryCalloc(1, sizeof(SCtgCacheOperation)); + op->opId = CTG_OP_CLEAR_CACHE; + op->syncOp = syncOp; + + SCtgClearCacheMsg *msg = taosMemoryMalloc(sizeof(SCtgClearCacheMsg)); + if (NULL == msg) { + ctgError("malloc %d failed", (int32_t)sizeof(SCtgClearCacheMsg)); + CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); + } + + msg->pCtg = pCtg; + op->data = msg; + + CTG_ERR_JRET(ctgEnqueue(pCtg, op)); + + return TSDB_CODE_SUCCESS; + +_return: + + taosMemoryFreeClear(msg); + + CTG_RET(code); +} + + int32_t ctgMetaRentInit(SCtgRentMgmt *mgmt, uint32_t rentSec, int8_t type) { mgmt->slotRIdx = 0; mgmt->slotNum = rentSec / CTG_RENT_SLOT_SECOND; @@ -1019,19 +1051,19 @@ int32_t ctgMetaRentAdd(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t size) if (NULL == slot->meta) { slot->meta = taosArrayInit(CTG_DEFAULT_RENT_SLOT_SIZE, size); if (NULL == slot->meta) { - qError("taosArrayInit %d failed, id:%"PRIx64", slot idx:%d, type:%d", CTG_DEFAULT_RENT_SLOT_SIZE, id, widx, mgmt->type); + qError("taosArrayInit %d failed, id:0x%"PRIx64", slot idx:%d, type:%d", CTG_DEFAULT_RENT_SLOT_SIZE, id, widx, mgmt->type); CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR); } } if (NULL == taosArrayPush(slot->meta, meta)) { - qError("taosArrayPush meta to rent failed, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); + qError("taosArrayPush meta to rent failed, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR); } slot->needSort = true; - qDebug("add meta to rent, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); + qDebug("add meta to rent, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); _return: @@ -1047,7 +1079,7 @@ int32_t ctgMetaRentUpdate(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t si CTG_LOCK(CTG_WRITE, &slot->lock); if (NULL == slot->meta) { - qError("empty meta slot, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); + qError("empty meta slot, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); } @@ -1060,20 +1092,20 @@ int32_t ctgMetaRentUpdate(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t si void *orig = taosArraySearch(slot->meta, &id, searchCompare, TD_EQ); if (NULL == orig) { - qDebug("meta not found in slot, id:%"PRIx64", slot idx:%d, type:%d, size:%d", id, widx, mgmt->type, (int32_t)taosArrayGetSize(slot->meta)); + qDebug("meta not found in slot, id:0x%"PRIx64", slot idx:%d, type:%d, size:%d", id, widx, mgmt->type, (int32_t)taosArrayGetSize(slot->meta)); CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); } memcpy(orig, meta, size); - qDebug("meta in rent updated, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); + qDebug("meta in rent updated, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); _return: CTG_UNLOCK(CTG_WRITE, &slot->lock); if (code) { - qDebug("meta in rent update failed, will try to add it, code:%x, id:%"PRIx64", slot idx:%d, type:%d", code, id, widx, mgmt->type); + qDebug("meta in rent update failed, will try to add it, code:%x, id:0x%"PRIx64", slot idx:%d, type:%d", code, id, widx, mgmt->type); CTG_RET(ctgMetaRentAdd(mgmt, meta, id, size)); } @@ -1088,7 +1120,7 @@ int32_t ctgMetaRentRemove(SCtgRentMgmt *mgmt, int64_t id, __compar_fn_t sortComp CTG_LOCK(CTG_WRITE, &slot->lock); if (NULL == slot->meta) { - qError("empty meta slot, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); + qError("empty meta slot, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); } @@ -1100,13 +1132,13 @@ int32_t ctgMetaRentRemove(SCtgRentMgmt *mgmt, int64_t id, __compar_fn_t sortComp int32_t idx = taosArraySearchIdx(slot->meta, &id, searchCompare, TD_EQ); if (idx < 0) { - qError("meta not found in slot, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); + qError("meta not found in slot, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); } taosArrayRemove(slot->meta, idx); - qDebug("meta in rent removed, id:%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); + qDebug("meta in rent removed, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type); _return: @@ -1219,11 +1251,11 @@ int32_t ctgAddNewDBCache(SCatalog *pCtg, const char *dbFName, uint64_t dbId) { SDbVgVersion vgVersion = {.dbId = newDBCache.dbId, .vgVersion = -1}; strncpy(vgVersion.dbFName, dbFName, sizeof(vgVersion.dbFName)); - ctgDebug("db added to cache, dbFName:%s, dbId:%"PRIx64, dbFName, dbId); + ctgDebug("db added to cache, dbFName:%s, dbId:0x%"PRIx64, dbFName, dbId); CTG_ERR_RET(ctgMetaRentAdd(&pCtg->dbRent, &vgVersion, dbId, sizeof(SDbVgVersion))); - ctgDebug("db added to rent, dbFName:%s, vgVersion:%d, dbId:%"PRIx64, dbFName, vgVersion.vgVersion, dbId); + ctgDebug("db added to rent, dbFName:%s, vgVersion:%d, dbId:0x%"PRIx64, dbFName, vgVersion.vgVersion, dbId); return TSDB_CODE_SUCCESS; @@ -1246,7 +1278,7 @@ void ctgRemoveStbRent(SCatalog* pCtg, SCtgDBCache *dbCache) { suid = taosHashGetKey(pIter, NULL); if (TSDB_CODE_SUCCESS == ctgMetaRentRemove(&pCtg->stbRent, *suid, ctgStbVersionSortCompare, ctgStbVersionSearchCompare)) { - ctgDebug("stb removed from rent, suid:%"PRIx64, *suid); + ctgDebug("stb removed from rent, suid:0x%"PRIx64, *suid); } pIter = taosHashIterate(dbCache->stbCache, pIter); @@ -1257,7 +1289,7 @@ void ctgRemoveStbRent(SCatalog* pCtg, SCtgDBCache *dbCache) { int32_t ctgRemoveDBFromCache(SCatalog* pCtg, SCtgDBCache *dbCache, const char* dbFName) { uint64_t dbId = dbCache->dbId; - ctgInfo("start to remove db from cache, dbFName:%s, dbId:%"PRIx64, dbFName, dbCache->dbId); + ctgInfo("start to remove db from cache, dbFName:%s, dbId:0x%"PRIx64, dbFName, dbCache->dbId); CTG_LOCK(CTG_WRITE, &dbCache->dbLock); @@ -1268,7 +1300,7 @@ int32_t ctgRemoveDBFromCache(SCatalog* pCtg, SCtgDBCache *dbCache, const char* d CTG_UNLOCK(CTG_WRITE, &dbCache->dbLock); CTG_ERR_RET(ctgMetaRentRemove(&pCtg->dbRent, dbId, ctgDbVgVersionSortCompare, ctgDbVgVersionSearchCompare)); - ctgDebug("db removed from rent, dbFName:%s, dbId:%"PRIx64, dbFName, dbId); + ctgDebug("db removed from rent, dbFName:%s, dbId:0x%"PRIx64, dbFName, dbId); if (taosHashRemove(pCtg->dbCache, dbFName, strlen(dbFName))) { ctgInfo("taosHashRemove from dbCache failed, may be removed, dbFName:%s", dbFName); @@ -1276,7 +1308,7 @@ int32_t ctgRemoveDBFromCache(SCatalog* pCtg, SCtgDBCache *dbCache, const char* d } CTG_CACHE_STAT_DEC(dbNum, 1); - ctgInfo("db removed from cache, dbFName:%s, dbId:%"PRIx64, dbFName, dbId); + ctgInfo("db removed from cache, dbFName:%s, dbId:0x%"PRIx64, dbFName, dbId); return TSDB_CODE_SUCCESS; } @@ -1339,7 +1371,7 @@ int32_t ctgUpdateRentStbVersion(SCatalog *pCtg, char* dbFName, char* tbName, uin CTG_ERR_RET(ctgMetaRentUpdate(&pCtg->stbRent, &metaRent, metaRent.suid, sizeof(SSTableVersion), ctgStbVersionSortCompare, ctgStbVersionSearchCompare)); - ctgDebug("db %s,%" PRIx64 " stb %s,%" PRIx64 " sver %d tver %d smaVer %d updated to stbRent", + ctgDebug("db %s,0x%" PRIx64 " stb %s,0x%" PRIx64 " sver %d tver %d smaVer %d updated to stbRent", dbFName, dbId, tbName, suid, metaRent.sversion, metaRent.tversion, metaRent.smaVer); return TSDB_CODE_SUCCESS; @@ -1349,7 +1381,7 @@ int32_t ctgUpdateRentStbVersion(SCatalog *pCtg, char* dbFName, char* tbName, uin int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFName, uint64_t dbId, char *tbName, STableMeta *meta, int32_t metaSize) { if (NULL == dbCache->tbCache || NULL == dbCache->stbCache) { taosMemoryFree(meta); - ctgError("db is dropping, dbId:%"PRIx64, dbCache->dbId); + ctgError("db is dropping, dbId:0x%"PRIx64, dbCache->dbId); CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED); } @@ -1370,10 +1402,10 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam if (origType == TSDB_SUPER_TABLE) { if (taosHashRemove(dbCache->stbCache, &orig->suid, sizeof(orig->suid))) { - ctgError("stb not exist in stbCache, dbFName:%s, stb:%s, suid:%"PRIx64, dbFName, tbName, orig->suid); + ctgError("stb not exist in stbCache, dbFName:%s, stb:%s, suid:0x%"PRIx64, dbFName, tbName, orig->suid); } else { CTG_CACHE_STAT_DEC(stblNum, 1); - ctgDebug("stb removed from stbCache, dbFName:%s, stb:%s, suid:%"PRIx64, dbFName, tbName, orig->suid); + ctgDebug("stb removed from stbCache, dbFName:%s, stb:%s, suid:0x%"PRIx64, dbFName, tbName, orig->suid); } origSuid = orig->suid; @@ -1407,13 +1439,13 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam } if (origSuid != meta->suid && taosHashPut(dbCache->stbCache, &meta->suid, sizeof(meta->suid), tbName, strlen(tbName) + 1) != 0) { - ctgError("taosHashPut to stable cache failed, suid:%"PRIx64, meta->suid); + ctgError("taosHashPut to stable cache failed, suid:0x%"PRIx64, meta->suid); CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR); } CTG_CACHE_STAT_INC(stblNum, 1); - ctgDebug("stb %" PRIx64 " updated to cache, dbFName:%s, tbName:%s, tbType:%d", meta->suid, dbFName, tbName, meta->tableType); + ctgDebug("stb 0x%" PRIx64 " updated to cache, dbFName:%s, tbName:%s, tbType:%d", meta->suid, dbFName, tbName, meta->tableType); CTG_ERR_RET(ctgUpdateRentStbVersion(pCtg, dbFName, tbName, dbId, meta->suid, pCache)); @@ -1424,7 +1456,7 @@ int32_t ctgWriteTbIndexToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char* dbFNa if (NULL == dbCache->tbCache) { ctgFreeSTableIndex(*index); taosMemoryFreeClear(*index); - ctgError("db is dropping, dbId:%"PRIx64, dbCache->dbId); + ctgError("db is dropping, dbId:0x%"PRIx64, dbCache->dbId); CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED); } @@ -1510,7 +1542,7 @@ int32_t ctgOpUpdateVgroup(SCtgCacheOperation *operation) { SCtgDBCache *dbCache = NULL; CTG_ERR_RET(ctgGetAddDBCache(msg->pCtg, dbFName, msg->dbId, &dbCache)); if (NULL == dbCache) { - ctgInfo("conflict db update, ignore this update, dbFName:%s, dbId:%"PRIx64, dbFName, msg->dbId); + ctgInfo("conflict db update, ignore this update, dbFName:%s, dbId:0x%"PRIx64, dbFName, msg->dbId); CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR); } @@ -1540,7 +1572,7 @@ int32_t ctgOpUpdateVgroup(SCtgCacheOperation *operation) { vgCache->vgInfo = dbInfo; msg->dbInfo = NULL; - ctgDebug("db vgInfo updated, dbFName:%s, vgVer:%d, dbId:%"PRIx64, dbFName, vgVersion.vgVersion, vgVersion.dbId); + ctgDebug("db vgInfo updated, dbFName:%s, vgVer:%d, dbId:0x%"PRIx64, dbFName, vgVersion.vgVersion, vgVersion.dbId); ctgWUnlockVgInfo(dbCache); @@ -1569,7 +1601,7 @@ int32_t ctgOpDropDbCache(SCtgCacheOperation *operation) { } if (dbCache->dbId != msg->dbId) { - ctgInfo("dbId already updated, dbFName:%s, dbId:%"PRIx64 ", targetId:%"PRIx64, msg->dbFName, dbCache->dbId, msg->dbId); + ctgInfo("dbId already updated, dbFName:%s, dbId:0x%"PRIx64 ", targetId:0x%"PRIx64, msg->dbFName, dbCache->dbId, msg->dbId); goto _return; } @@ -1629,7 +1661,7 @@ int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *operation) { CTG_ERR_JRET(ctgGetAddDBCache(pCtg, pMeta->dbFName, pMeta->dbId, &dbCache)); if (NULL == dbCache) { - ctgInfo("conflict db update, ignore this update, dbFName:%s, dbId:%" PRIx64, pMeta->dbFName, pMeta->dbId); + ctgInfo("conflict db update, ignore this update, dbFName:%s, dbId:0x%" PRIx64, pMeta->dbFName, pMeta->dbId); CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); } @@ -1673,27 +1705,28 @@ int32_t ctgOpDropStbMeta(SCtgCacheOperation *operation) { } if (msg->dbId && (dbCache->dbId != msg->dbId)) { - ctgDebug("dbId already modified, dbFName:%s, current:%"PRIx64", dbId:%"PRIx64", stb:%s, suid:%"PRIx64, msg->dbFName, dbCache->dbId, msg->dbId, msg->stbName, msg->suid); + ctgDebug("dbId already modified, dbFName:%s, current:0x%"PRIx64", dbId:0x%"PRIx64", stb:%s, suid:0x%"PRIx64, + msg->dbFName, dbCache->dbId, msg->dbId, msg->stbName, msg->suid); return TSDB_CODE_SUCCESS; } if (taosHashRemove(dbCache->stbCache, &msg->suid, sizeof(msg->suid))) { - ctgDebug("stb not exist in stbCache, may be removed, dbFName:%s, stb:%s, suid:%"PRIx64, msg->dbFName, msg->stbName, msg->suid); + ctgDebug("stb not exist in stbCache, may be removed, dbFName:%s, stb:%s, suid:0x%"PRIx64, msg->dbFName, msg->stbName, msg->suid); } else { CTG_CACHE_STAT_DEC(stblNum, 1); } if (taosHashRemove(dbCache->tbCache, msg->stbName, strlen(msg->stbName))) { - ctgError("stb not exist in cache, dbFName:%s, stb:%s, suid:%"PRIx64, msg->dbFName, msg->stbName, msg->suid); + ctgError("stb not exist in cache, dbFName:%s, stb:%s, suid:0x%"PRIx64, msg->dbFName, msg->stbName, msg->suid); } else { CTG_CACHE_STAT_DEC(tblNum, 1); } - ctgInfo("stb removed from cache, dbFName:%s, stbName:%s, suid:%"PRIx64, msg->dbFName, msg->stbName, msg->suid); + ctgInfo("stb removed from cache, dbFName:%s, stbName:%s, suid:0x%"PRIx64, msg->dbFName, msg->stbName, msg->suid); CTG_ERR_JRET(ctgMetaRentRemove(&msg->pCtg->stbRent, msg->suid, ctgStbVersionSortCompare, ctgStbVersionSearchCompare)); - ctgDebug("stb removed from rent, dbFName:%s, stbName:%s, suid:%"PRIx64, msg->dbFName, msg->stbName, msg->suid); + ctgDebug("stb removed from rent, dbFName:%s, stbName:%s, suid:0x%"PRIx64, msg->dbFName, msg->stbName, msg->suid); _return: @@ -1714,7 +1747,7 @@ int32_t ctgOpDropTbMeta(SCtgCacheOperation *operation) { } if (dbCache->dbId != msg->dbId) { - ctgDebug("dbId %" PRIx64 " not match with curId %"PRIx64", dbFName:%s, tbName:%s", msg->dbId, dbCache->dbId, msg->dbFName, msg->tbName); + ctgDebug("dbId 0x%" PRIx64 " not match with curId 0x%"PRIx64", dbFName:%s, tbName:%s", msg->dbId, dbCache->dbId, msg->dbFName, msg->tbName); return TSDB_CODE_SUCCESS; } @@ -1898,6 +1931,37 @@ _return: } +int32_t ctgOpClearCache(SCtgCacheOperation *operation) { + int32_t code = 0; + SCtgClearCacheMsg *msg = operation->data; + SCatalog* pCtg = msg->pCtg; + + if (pCtg) { + catalogFreeHandle(pCtg); + goto _return; + } + + void* pIter = taosHashIterate(gCtgMgmt.pCluster, NULL); + while (pIter) { + pCtg = *(SCatalog**)pIter; + + if (pCtg) { + catalogFreeHandle(pCtg); + } + + pIter = taosHashIterate(gCtgMgmt.pCluster, pIter); + } + + taosHashClear(gCtgMgmt.pCluster); + +_return: + + taosMemoryFreeClear(msg); + + CTG_RET(code); +} + + void ctgUpdateThreadUnexpectedStopped(void) { if (!atomic_load_8((int8_t*)&gCtgMgmt.exit) && CTG_IS_LOCKED(&gCtgMgmt.lock) > 0) CTG_UNLOCK(CTG_READ, &gCtgMgmt.lock); } @@ -1971,11 +2035,10 @@ void* ctgUpdateThreadFunc(void* param) { CTG_RT_STAT_INC(qDoneNum, 1); + ctgdShowCacheInfo(); ctgdShowClusterCache(pCtg); } - if (CTG_IS_LOCKED(&gCtgMgmt.lock)) CTG_UNLOCK(CTG_READ, &gCtgMgmt.lock); - qInfo("catalog update thread stopped"); return NULL; diff --git a/source/libs/catalog/src/ctgDbg.c b/source/libs/catalog/src/ctgDbg.c index ff93bedb21..7f2b919f17 100644 --- a/source/libs/catalog/src/ctgDbg.c +++ b/source/libs/catalog/src/ctgDbg.c @@ -19,7 +19,7 @@ #include "catalogInt.h" extern SCatalogMgmt gCtgMgmt; -SCtgDebug gCTGDebug = {.lockEnable = true, .apiEnable = true}; +SCtgDebug gCTGDebug = {.cacheEnable = true}; void ctgdUserCallback(SMetaData* pResult, void* param, int32_t code) { ASSERT(*(int32_t*)param == 1); @@ -40,9 +40,9 @@ void ctgdUserCallback(SMetaData* pResult, void* param, int32_t code) { STableComInfo *c = &p->tableInfo; if (TSDB_CHILD_TABLE == p->tableType) { - qDebug("table meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64, p->tableType, p->vgId, p->uid, p->suid); + qDebug("table meta: type:%d, vgId:%d, uid:0x%" PRIx64 ",suid:0x%" PRIx64, p->tableType, p->vgId, p->uid, p->suid); } else { - qDebug("table meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64 ",sv:%d, tv:%d, tagNum:%d, precision:%d, colNum:%d, rowSize:%d", + qDebug("table meta: type:%d, vgId:%d, uid:0x%" PRIx64 ",suid:0x%" PRIx64 ",sv:%d, tv:%d, tagNum:%d, precision:%d, colNum:%d, rowSize:%d", p->tableType, p->vgId, p->uid, p->suid, p->sversion, p->tversion, c->numOfTags, c->precision, c->numOfColumns, c->rowSize); } @@ -75,7 +75,7 @@ void ctgdUserCallback(SMetaData* pResult, void* param, int32_t code) { num = taosArrayGetSize(pResult->pDbInfo); for (int32_t i = 0; i < num; ++i) { SDbInfo *pDb = taosArrayGet(pResult->pDbInfo, i); - qDebug("db %d dbInfo: vgVer:%d, tbNum:%d, dbId:%" PRIx64, i, pDb->vgVer, pDb->tbNum, pDb->dbId); + qDebug("db %d dbInfo: vgVer:%d, tbNum:%d, dbId:0x%" PRIx64, i, pDb->vgVer, pDb->tbNum, pDb->dbId); } } else { qDebug("empty db info"); @@ -333,10 +333,10 @@ void ctgdShowTableMeta(SCatalog* pCtg, const char *tbName, STableMeta* p) { STableComInfo *c = &p->tableInfo; if (TSDB_CHILD_TABLE == p->tableType) { - ctgDebug("table [%s] meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64, tbName, p->tableType, p->vgId, p->uid, p->suid); + ctgDebug("table [%s] meta: type:%d, vgId:%d, uid:0x%" PRIx64 ",suid:0x%" PRIx64, tbName, p->tableType, p->vgId, p->uid, p->suid); return; } else { - ctgDebug("table [%s] meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64 ",sv:%d, tv:%d, tagNum:%d, precision:%d, colNum:%d, rowSize:%d", + ctgDebug("table [%s] meta: type:%d, vgId:%d, uid:0x%" PRIx64 ",suid:0x%" PRIx64 ",sv:%d, tv:%d, tagNum:%d, precision:%d, colNum:%d, rowSize:%d", tbName, p->tableType, p->vgId, p->uid, p->suid, p->sversion, p->tversion, c->numOfTags, c->precision, c->numOfColumns, c->rowSize); } @@ -377,7 +377,7 @@ void ctgdShowDBCache(SCatalog* pCtg, SHashObj *dbHash) { } } - ctgDebug("[%d] db [%.*s][%"PRIx64"] %s: metaNum:%d, stbNum:%d, vgVersion:%d, hashMethod:%d, vgNum:%d", + ctgDebug("[%d] db [%.*s][0x%"PRIx64"] %s: metaNum:%d, stbNum:%d, vgVersion:%d, hashMethod:%d, vgNum:%d", i, (int32_t)len, dbFName, dbCache->dbId, dbCache->deleted?"deleted":"", metaNum, stbNum, vgVersion, hashMethod, vgNum); pIter = taosHashIterate(dbHash, pIter); @@ -392,13 +392,13 @@ void ctgdShowClusterCache(SCatalog* pCtg) { return; } - ctgDebug("## cluster %"PRIx64" %p cache Info BEGIN ##", pCtg->clusterId, pCtg); + ctgDebug("## cluster 0x%"PRIx64" %p cache Info BEGIN ##", pCtg->clusterId, pCtg); ctgDebug("db:%d meta:%d stb:%d dbRent:%d stbRent:%d", ctgdGetClusterCacheNum(pCtg, CTG_DBG_DB_NUM), ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM), ctgdGetClusterCacheNum(pCtg, CTG_DBG_STB_NUM), ctgdGetClusterCacheNum(pCtg, CTG_DBG_DB_RENT_NUM), ctgdGetClusterCacheNum(pCtg, CTG_DBG_STB_RENT_NUM)); ctgdShowDBCache(pCtg, pCtg->dbCache); - ctgDebug("## cluster %"PRIx64" %p cache Info END ##", pCtg->clusterId, pCtg); + ctgDebug("## cluster 0x%"PRIx64" %p cache Info END ##", pCtg->clusterId, pCtg); } int32_t ctgdShowCacheInfo(void) { @@ -407,6 +407,8 @@ int32_t ctgdShowCacheInfo(void) { } CTG_API_ENTER(); + + qDebug("# total catalog cluster number %d #", taosHashGetSize(gCtgMgmt.pCluster)); SCatalog *pCtg = NULL; void *pIter = taosHashIterate(gCtgMgmt.pCluster, NULL); diff --git a/source/libs/catalog/src/ctgRemote.c b/source/libs/catalog/src/ctgRemote.c index 178025704f..fa1a262832 100644 --- a/source/libs/catalog/src/ctgRemote.c +++ b/source/libs/catalog/src/ctgRemote.c @@ -186,13 +186,13 @@ int32_t ctgHandleMsgCallback(void *param, const SDataBuf *pMsg, int32_t rspCode) SCtgJob* pJob = taosAcquireRef(gCtgMgmt.jobPool, cbParam->refId); if (NULL == pJob) { - qDebug("job refId %" PRIx64 " already dropped", cbParam->refId); + qDebug("ctg job refId 0x%" PRIx64 " already dropped", cbParam->refId); goto _return; } SCtgTask *pTask = taosArrayGet(pJob->pTasks, cbParam->taskId); - qDebug("QID:0x%" PRIx64 " task %d start to handle rsp %s", pJob->queryId, pTask->taskId, TMSG_INFO(cbParam->reqType + 1)); + qDebug("QID:0x%" PRIx64 " ctg task %d start to handle rsp %s", pJob->queryId, pTask->taskId, TMSG_INFO(cbParam->reqType + 1)); CTG_ERR_JRET((*gCtgAsyncFps[pTask->type].handleRspFp)(pTask, cbParam->reqType, pMsg, rspCode)); @@ -263,7 +263,7 @@ int32_t ctgAsyncSendMsg(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTask* pTask CTG_ERR_JRET(code); } - ctgDebug("req msg sent, reqId:0x%" PRIx64 ", msg type:%d, %s", pTask->pJob->queryId, msgType, TMSG_INFO(msgType)); + ctgDebug("ctg req msg sent, reqId:0x%" PRIx64 ", msg type:%d, %s", pTask->pJob->queryId, msgType, TMSG_INFO(msgType)); return TSDB_CODE_SUCCESS; _return: diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c index e97c34dc26..476eb371b0 100644 --- a/source/libs/catalog/src/ctgUtil.c +++ b/source/libs/catalog/src/ctgUtil.c @@ -434,7 +434,7 @@ void ctgFreeJob(void* job) { taosMemoryFree(job); - qDebug("QID:%" PRIx64 ", job %" PRIx64 " freed", qid, rid); + qDebug("QID:0x%" PRIx64 ", ctg job 0x%" PRIx64 " freed", qid, rid); } int32_t ctgUpdateMsgCtx(SCtgMsgCtx* pCtx, int32_t reqType, void* out, char* target) { diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index f330b7ce16..f06664b60b 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -14,6 +14,7 @@ */ #include "command.h" +#include "catalog.h" #include "tdatablock.h" static int32_t getSchemaBytes(const SSchema* pSchema) { @@ -120,8 +121,7 @@ static int32_t execDescribe(SNode* pStmt, SRetrieveTableRsp** pRsp) { } static int32_t execResetQueryCache() { - // todo - return TSDB_CODE_SUCCESS; + return catalogClearCache(); } int32_t qExecCommand(SNode* pStmt, SRetrieveTableRsp** pRsp) { diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 79861dfa05..8f91282480 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -194,6 +194,11 @@ int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNo pPhysiChildren = fillPhysiNode->node.pChildren; break; } + case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN: { + STableMergeScanPhysiNode *mergePhysiNode = (STableMergeScanPhysiNode *)pNode; + pPhysiChildren = mergePhysiNode->scan.node.pChildren; + break; + } default: qError("not supported physical node type %d", pNode->type); QRY_ERR_RET(TSDB_CODE_QRY_APP_ERROR); @@ -398,6 +403,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i break; } case QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN: + case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN: case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN: { STableScanPhysiNode *pTblScanNode = (STableScanPhysiNode *)pNode; EXPLAIN_ROW_NEW(level, EXPLAIN_TBL_SCAN_FORMAT, pTblScanNode->scan.tableName.tname); diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index a70cff5552..8fd5e7f41e 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -15,7 +15,9 @@ #ifndef TDENGINE_QUERYUTIL_H #define TDENGINE_QUERYUTIL_H -#include +#include "function.h" +#include "nodes.h" +#include "plannodes.h" #include "tbuffer.h" #include "tcommon.h" #include "tpagedbuf.h" @@ -77,7 +79,7 @@ typedef struct SResultRowInfo { struct SqlFunctionCtx; size_t getResultRowSize(struct SqlFunctionCtx* pCtx, int32_t numOfOutput); -int32_t initResultRowInfo(SResultRowInfo* pResultRowInfo, int32_t size); +void initResultRowInfo(SResultRowInfo* pResultRowInfo); void cleanupResultRowInfo(SResultRowInfo* pResultRowInfo); void closeAllResultRows(SResultRowInfo* pResultRowInfo); @@ -86,7 +88,7 @@ void initResultRow(SResultRow *pResultRow); void closeResultRow(SResultRow* pResultRow); bool isResultRowClosed(SResultRow* pResultRow); -struct SResultRowEntryInfo* getResultCell(const SResultRow* pRow, int32_t index, const int32_t* offset); +struct SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t index, const int32_t* offset); static FORCE_INLINE SResultRow *getResultRowByPos(SDiskbasedBuf* pBuf, SResultRowPosition* pos) { SFilePage* bufPage = (SFilePage*) getBufPage(pBuf, pos->pageId); @@ -98,9 +100,27 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList); void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo); -bool hashRemainDataInGroupInfo(SGroupResInfo* pGroupResInfo); +bool hasDataInGroupInfo(SGroupResInfo* pGroupResInfo); -bool incNextGroup(SGroupResInfo* pGroupResInfo); int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo); +SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode); + +int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo* pListInfo, SNode* pTagCond); +SArray* createSortInfo(SNodeList* pNodeList); +SArray* extractPartitionColInfo(SNodeList* pNodeList); +SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNodeList, int32_t* numOfOutputCols, int32_t type); + +SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* numOfExprs); + +SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowCellInfoOffset); +void relocateColumnData(SSDataBlock* pBlock, const SArray* pColMatchInfo, SArray* pCols); +void initExecTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pQueryWindow); + +SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode); +SColumn extractColumnFromColumnNode(SColumnNode* pColNode); + +int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysiNode* pTableScanNode); +void cleanupQueryTableDataCond(SQueryTableDataCond* pCond); + #endif // TDENGINE_QUERYUTIL_H diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 034e2893df..5efb448a38 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -747,43 +747,27 @@ void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWindow* pWin, SColumnInfoData* pTimeWindowData, int32_t offset, int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput, int32_t order); -int32_t setGroupResultOutputBuf(SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, int16_t type, int16_t bytes, - int32_t groupId, SDiskbasedBuf* pBuf, SExecTaskInfo* pTaskInfo, SAggSupporter* pAggSup); + void doDestroyBasicInfo(SOptrBasicInfo* pInfo, int32_t numOfOutput); -int32_t setDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadInfo, int32_t numOfRows, char* pData, +int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadInfo, int32_t numOfRows, char* pData, int32_t compLen, int32_t numOfOutput, int64_t startTs, uint64_t* total, SArray* pColList); void getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key, STimeWindow* win); int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag); int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz); -SArray* extractPartitionColInfo(SNodeList* pNodeList); - void doSetOperatorCompleted(SOperatorInfo* pOperator); void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock); -SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowCellInfoOffset); -void relocateColumnData(SSDataBlock* pBlock, const SArray* pColMatchInfo, SArray* pCols); -void initExecTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pQueryWindow); + void cleanupAggSup(SAggSupporter* pAggSup); void destroyBasicOperatorInfo(void* param, int32_t numOfOutput); void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle); void setTbNameColData(void* pMeta, const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, int32_t functionId); -SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode); -SColumn extractColumnFromColumnNode(SColumnNode* pColNode); -SSDataBlock* getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t capacity, SArray* pColMatchInfo, SSortOperatorInfo* pInfo); SSDataBlock* loadNextDataBlock(void* param); void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowCellInfoOffset); -SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNodeList, int32_t* numOfOutputCols, - SExecTaskInfo* pTaskInfo, int32_t type); - -SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* numOfExprs); -SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode); -int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysiNode* pTableScanNode); -void clearupQueryTableDataCond(SQueryTableDataCond* pCond); - SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData, int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo, bool isIntervalQuery, SAggSupporter* pSup); @@ -799,9 +783,9 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo); SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhysiNode *pNode, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t num, SSDataBlock* pResBlock, SLimit* pLimit, SLimit* pSlimit, SNode* pCondition, SExecTaskInfo* pTaskInfo); -SOperatorInfo *createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pResBlock, SArray* pSortInfo, SExprInfo* pExprInfo, int32_t numOfCols, - SArray* pIndexMap, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* pProjPhyNode, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortPhyNode, SExecTaskInfo* pTaskInfo); + SOperatorInfo* createMultiwaySortMergeOperatorInfo(SOperatorInfo** downStreams, int32_t numStreams, SSDataBlock* pInputBlock, SSDataBlock* pResBlock, SArray* pSortInfo, SArray* pColMatchColInfo, SExecTaskInfo* pTaskInfo); @@ -831,10 +815,9 @@ SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SExecTaskInfo* SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode, SExecTaskInfo* pTaskInfo, STimeWindowAggSupp* pTwSup); +SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFillNode, bool multigroupResult, + SExecTaskInfo* pTaskInfo); -SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols, - SInterval* pInterval, STimeWindow* pWindow, SSDataBlock* pResBlock, int32_t fillType, SNodeListNode* fillVal, - bool multigroupResult, SExecTaskInfo* pTaskInfo); SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols, SSDataBlock* pResBlock, STimeWindowAggSupp *pTwAggSupp, int32_t tsSlotId, SColumn* pStateKeyCol, SExecTaskInfo* pTaskInfo); @@ -843,10 +826,13 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, const SNodeListNode* pValNode, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResBlock, SNode* pOnCondition, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SJoinPhysiNode* pJoinNode, + SExecTaskInfo* pTaskInfo); -SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, - SSDataBlock* pResBlock, int64_t gap, int32_t tsSlotId, STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, + SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream, + SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild); SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo); @@ -862,8 +848,8 @@ void setInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlo bool isTaskKilled(SExecTaskInfo* pTaskInfo); int32_t checkForQueryBuf(size_t numOfTables); -void setTaskKilled(SExecTaskInfo* pTaskInfo); -void queryCostStatis(SExecTaskInfo* pTaskInfo); +void setTaskKilled(SExecTaskInfo* pTaskInfo); +void queryCostStatis(SExecTaskInfo* pTaskInfo); void doDestroyTask(SExecTaskInfo* pTaskInfo); int32_t getMaximumIdleDurationSec(); @@ -882,7 +868,7 @@ int32_t encodeOperator(SOperatorInfo* ops, char** data, int32_t *length); * length: the length of data * return: result code, 0 means success */ -int32_t decodeOperator(SOperatorInfo* ops, char* data, int32_t length); +int32_t decodeOperator(SOperatorInfo* ops, const char* data, int32_t length); void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status); int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, @@ -912,8 +898,6 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, int32_t numOfExprs, const int32_t* rowCellOffset, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo); -int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo, - SNode* pTagCond); int32_t createMultipleDataReaders(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, STableListInfo* pTableListInfo, SArray* arrayReader, uint64_t queryId, uint64_t taskId, SNode* pTagCond); diff --git a/source/libs/executor/inc/tsort.h b/source/libs/executor/inc/tsort.h index 86ee841cc2..363f379ee4 100644 --- a/source/libs/executor/inc/tsort.h +++ b/source/libs/executor/inc/tsort.h @@ -63,7 +63,7 @@ typedef int32_t (*_sort_merge_compar_fn_t)(const void* p1, const void* p2, void* * @param type * @return */ -SSortHandle* tsortCreateSortHandle(SArray* pOrderInfo, SArray* pIndexMap, int32_t type, int32_t pageSize, int32_t numOfPages, SSDataBlock* pBlock, const char* idstr); +SSortHandle* tsortCreateSortHandle(SArray* pOrderInfo, int32_t type, int32_t pageSize, int32_t numOfPages, SSDataBlock* pBlock, const char* idstr); /** * diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 01ed30c189..c8d9252013 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -14,24 +14,20 @@ */ #include "os.h" -#include "tmsg.h" +#include "index.h" +#include "function.h" +#include "functionMgt.h" +#include "tdatablock.h" #include "thash.h" +#include "tmsg.h" #include "executil.h" #include "executorimpl.h" #include "tcompression.h" -#include "tlosertree.h" -typedef struct SCompSupporter { - STableQueryInfo **pTableQueryInfo; - int32_t *rowIndex; - int32_t order; -} SCompSupporter; - -int32_t initResultRowInfo(SResultRowInfo *pResultRowInfo, int32_t size) { +void initResultRowInfo(SResultRowInfo *pResultRowInfo) { pResultRowInfo->size = 0; pResultRowInfo->cur.pageId = -1; - return TSDB_CODE_SUCCESS; } void cleanupResultRowInfo(SResultRowInfo *pResultRowInfo) { @@ -74,7 +70,7 @@ void closeResultRow(SResultRow* pResultRow) { } // TODO refactor: use macro -SResultRowEntryInfo* getResultCell(const SResultRow* pRow, int32_t index, const int32_t* offset) { +SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t index, const int32_t* offset) { assert(index >= 0 && offset != NULL); return (SResultRowEntryInfo*)((char*) pRow->pEntryInfo + offset[index]); } @@ -160,7 +156,7 @@ void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayL ASSERT(pGroupResInfo->index <= getNumOfTotalRes(pGroupResInfo)); } -bool hashRemainDataInGroupInfo(SGroupResInfo* pGroupResInfo) { +bool hasDataInGroupInfo(SGroupResInfo* pGroupResInfo) { if (pGroupResInfo->pRows == NULL) { return false; } @@ -177,86 +173,532 @@ int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo) { return (int32_t) taosArrayGetSize(pGroupResInfo->pRows); } -static int32_t tableResultComparFn(const void *pLeft, const void *pRight, void *param) { - int32_t left = *(int32_t *)pLeft; - int32_t right = *(int32_t *)pRight; - - SCompSupporter * supporter = (SCompSupporter *)param; - - int32_t leftPos = supporter->rowIndex[left]; - int32_t rightPos = supporter->rowIndex[right]; - - /* left source is exhausted */ - if (leftPos == -1) { - return 1; +SArray* createSortInfo(SNodeList* pNodeList) { + size_t numOfCols = LIST_LENGTH(pNodeList); + SArray* pList = taosArrayInit(numOfCols, sizeof(SBlockOrderInfo)); + if (pList == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return pList; } - /* right source is exhausted*/ - if (rightPos == -1) { - return -1; + for (int32_t i = 0; i < numOfCols; ++i) { + SOrderByExprNode* pSortKey = (SOrderByExprNode*)nodesListGetNode(pNodeList, i); + SBlockOrderInfo bi = {0}; + bi.order = (pSortKey->order == ORDER_ASC) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC; + bi.nullFirst = (pSortKey->nullOrder == NULL_ORDER_FIRST); + + SColumnNode* pColNode = (SColumnNode*)pSortKey->pExpr; + bi.slotId = pColNode->slotId; + taosArrayPush(pList, &bi); } - ASSERT(0); - STableQueryInfo** pList = supporter->pTableQueryInfo; -// SResultRow* pWindowRes1 = pList[left]->resInfo.pResult[leftPos]; -// SResultRow * pWindowRes1 = getResultRow(&(pList[left]->resInfo), leftPos); -// TSKEY leftTimestamp = pWindowRes1->win.skey; - -// SResultRowInfo *pWindowResInfo2 = &(pList[right]->resInfo); -// SResultRow * pWindowRes2 = getResultRow(pWindowResInfo2, rightPos); -// SResultRow* pWindowRes2 = pList[right]->resInfo.pResult[rightPos]; -// TSKEY rightTimestamp = pWindowRes2->win.skey; - -// if (leftTimestamp == rightTimestamp) { - return 0; -// } - -// if (supporter->order == TSDB_ORDER_ASC) { -// return (leftTimestamp > rightTimestamp)? 1:-1; -// } else { -// return (leftTimestamp < rightTimestamp)? 1:-1; -// } + return pList; } -int32_t tsAscOrder(const void* p1, const void* p2) { - SResultRowCell* pc1 = (SResultRowCell*) p1; - SResultRowCell* pc2 = (SResultRowCell*) p2; +SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode) { + int32_t numOfCols = LIST_LENGTH(pNode->pSlots); - if (pc1->groupId == pc2->groupId) { - ASSERT(0); -// if (pc1->pRow->win.skey == pc2->pRow->win.skey) { -// return 0; -// } else { -// return (pc1->pRow->win.skey < pc2->pRow->win.skey)? -1:1; -// } + SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock)); + pBlock->pDataBlock = taosArrayInit(numOfCols, sizeof(SColumnInfoData)); + + pBlock->info.blockId = pNode->dataBlockId; + pBlock->info.rowSize = pNode->totalRowSize; // todo ?? + pBlock->info.type = STREAM_INVALID; + + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData idata = {{0}}; + SSlotDescNode* pDescNode = (SSlotDescNode*)nodesListGetNode(pNode->pSlots, i); + // if (!pDescNode->output) { // todo disable it temporarily + // continue; + // } + + idata.info.type = pDescNode->dataType.type; + idata.info.bytes = pDescNode->dataType.bytes; + idata.info.scale = pDescNode->dataType.scale; + idata.info.slotId = pDescNode->slotId; + idata.info.precision = pDescNode->dataType.precision; + + if (IS_VAR_DATA_TYPE(idata.info.type)) { + pBlock->info.hasVarCol = true; + } + + taosArrayPush(pBlock->pDataBlock, &idata); + } + + pBlock->info.numOfCols = taosArrayGetSize(pBlock->pDataBlock); + return pBlock; +} + +int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo* pListInfo, SNode* pTagCond) { + int32_t code = TSDB_CODE_SUCCESS; + pListInfo->pTableList = taosArrayInit(8, sizeof(STableKeyInfo)); + + uint64_t tableUid = pScanNode->uid; + + if (pScanNode->tableType == TSDB_SUPER_TABLE) { + if (pTagCond) { + SIndexMetaArg metaArg = { + .metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = tsdbGetIvtIdx(metaHandle), .suid = tableUid}; + + SArray* res = taosArrayInit(8, sizeof(uint64_t)); + code = doFilterTag(pTagCond, &metaArg, res); + if (code == TSDB_CODE_INDEX_REBUILDING) { // todo + // doFilter(); + } else if (code != TSDB_CODE_SUCCESS) { + qError("failed to get tableIds, reason: %s, suid: %" PRIu64 "", tstrerror(code), tableUid); + taosArrayDestroy(res); + terrno = code; + return code; + } else { + qDebug("sucess to get tableIds, size: %d, suid: %" PRIu64 "", (int)taosArrayGetSize(res), tableUid); + } + + for (int i = 0; i < taosArrayGetSize(res); i++) { + STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, .uid = *(uint64_t*)taosArrayGet(res, i)}; + taosArrayPush(pListInfo->pTableList, &info); + } + taosArrayDestroy(res); + } else { + code = tsdbGetAllTableList(metaHandle, tableUid, pListInfo->pTableList); + } + } else { // Create one table group. + STableKeyInfo info = {.lastKey = 0, .uid = tableUid}; + taosArrayPush(pListInfo->pTableList, &info); + } + + return code; +} + +SArray* extractPartitionColInfo(SNodeList* pNodeList) { + if(!pNodeList) { + return NULL; + } + + size_t numOfCols = LIST_LENGTH(pNodeList); + SArray* pList = taosArrayInit(numOfCols, sizeof(SColumn)); + if (pList == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnNode* pColNode = (SColumnNode*)nodesListGetNode(pNodeList, i); + + // todo extract method + SColumn c = {0}; + c.slotId = pColNode->slotId; + c.colId = pColNode->colId; + c.type = pColNode->node.resType.type; + c.bytes = pColNode->node.resType.bytes; + c.precision = pColNode->node.resType.precision; + c.scale = pColNode->node.resType.scale; + + taosArrayPush(pList, &c); + } + + return pList; +} + + +SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNodeList, int32_t* numOfOutputCols, + int32_t type) { + size_t numOfCols = LIST_LENGTH(pNodeList); + SArray* pList = taosArrayInit(numOfCols, sizeof(SColMatchInfo)); + if (pList == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + + for (int32_t i = 0; i < numOfCols; ++i) { + STargetNode* pNode = (STargetNode*)nodesListGetNode(pNodeList, i); + SColumnNode* pColNode = (SColumnNode*)pNode->pExpr; + + SColMatchInfo c = {0}; + c.output = true; + c.colId = pColNode->colId; + c.srcSlotId = pColNode->slotId; + c.matchType = type; + c.targetSlotId = pNode->slotId; + taosArrayPush(pList, &c); + } + + *numOfOutputCols = 0; + int32_t num = LIST_LENGTH(pOutputNodeList->pSlots); + for (int32_t i = 0; i < num; ++i) { + SSlotDescNode* pNode = (SSlotDescNode*)nodesListGetNode(pOutputNodeList->pSlots, i); + + // todo: add reserve flag check + // it is a column reserved for the arithmetic expression calculation + if (pNode->slotId >= numOfCols) { + (*numOfOutputCols) += 1; + continue; + } + + SColMatchInfo* info = taosArrayGet(pList, pNode->slotId); + if (pNode->output) { + (*numOfOutputCols) += 1; + } else { + info->output = false; + } + } + + return pList; +} + +static SResSchema createResSchema(int32_t type, int32_t bytes, int32_t slotId, int32_t scale, int32_t precision, + const char* name) { + SResSchema s = {0}; + s.scale = scale; + s.type = type; + s.bytes = bytes; + s.slotId = slotId; + s.precision = precision; + strncpy(s.name, name, tListLen(s.name)); + + return s; +} + +static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDataType* pType) { + SColumn* pCol = taosMemoryCalloc(1, sizeof(SColumn)); + if (pCol == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return NULL; + } + + pCol->slotId = slotId; + pCol->colId = colId; + pCol->bytes = pType->bytes; + pCol->type = pType->type; + pCol->scale = pType->scale; + pCol->precision = pType->precision; + pCol->dataBlockId = blockId; + + return pCol; +} + +SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* numOfExprs) { + int32_t numOfFuncs = LIST_LENGTH(pNodeList); + int32_t numOfGroupKeys = 0; + if (pGroupKeys != NULL) { + numOfGroupKeys = LIST_LENGTH(pGroupKeys); + } + + *numOfExprs = numOfFuncs + numOfGroupKeys; + SExprInfo* pExprs = taosMemoryCalloc(*numOfExprs, sizeof(SExprInfo)); + + for (int32_t i = 0; i < (*numOfExprs); ++i) { + STargetNode* pTargetNode = NULL; + if (i < numOfFuncs) { + pTargetNode = (STargetNode*)nodesListGetNode(pNodeList, i); + } else { + pTargetNode = (STargetNode*)nodesListGetNode(pGroupKeys, i - numOfFuncs); + } + + SExprInfo* pExp = &pExprs[i]; + + pExp->pExpr = taosMemoryCalloc(1, sizeof(tExprNode)); + pExp->pExpr->_function.num = 1; + pExp->pExpr->_function.functionId = -1; + + int32_t type = nodeType(pTargetNode->pExpr); + // it is a project query, or group by column + if (type == QUERY_NODE_COLUMN) { + pExp->pExpr->nodeType = QUERY_NODE_COLUMN; + SColumnNode* pColNode = (SColumnNode*)pTargetNode->pExpr; + + pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam)); + pExp->base.numOfParams = 1; + + SDataType* pType = &pColNode->node.resType; + pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale, + pType->precision, pColNode->colName); + pExp->base.pParam[0].pCol = createColumn(pColNode->dataBlockId, pColNode->slotId, pColNode->colId, pType); + pExp->base.pParam[0].type = FUNC_PARAM_TYPE_COLUMN; + } else if (type == QUERY_NODE_VALUE) { + pExp->pExpr->nodeType = QUERY_NODE_VALUE; + SValueNode* pValNode = (SValueNode*)pTargetNode->pExpr; + + pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam)); + pExp->base.numOfParams = 1; + + SDataType* pType = &pValNode->node.resType; + pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale, + pType->precision, pValNode->node.aliasName); + pExp->base.pParam[0].type = FUNC_PARAM_TYPE_VALUE; + nodesValueNodeToVariant(pValNode, &pExp->base.pParam[0].param); + } else if (type == QUERY_NODE_FUNCTION) { + pExp->pExpr->nodeType = QUERY_NODE_FUNCTION; + SFunctionNode* pFuncNode = (SFunctionNode*)pTargetNode->pExpr; + + SDataType* pType = &pFuncNode->node.resType; + pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale, + pType->precision, pFuncNode->node.aliasName); + + pExp->pExpr->_function.functionId = pFuncNode->funcId; + pExp->pExpr->_function.pFunctNode = pFuncNode; + + strncpy(pExp->pExpr->_function.functionName, pFuncNode->functionName, + tListLen(pExp->pExpr->_function.functionName)); +#if 1 + // todo refactor: add the parameter for tbname function + if (strcmp(pExp->pExpr->_function.functionName, "tbname") == 0) { + pFuncNode->pParameterList = nodesMakeList(); + ASSERT(LIST_LENGTH(pFuncNode->pParameterList) == 0); + SValueNode* res = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); + if (NULL == res) { // todo handle error + } else { + res->node.resType = (SDataType){.bytes = sizeof(int64_t), .type = TSDB_DATA_TYPE_BIGINT}; + nodesListAppend(pFuncNode->pParameterList, (SNode*)res); + } + } +#endif + + int32_t numOfParam = LIST_LENGTH(pFuncNode->pParameterList); + + pExp->base.pParam = taosMemoryCalloc(numOfParam, sizeof(SFunctParam)); + pExp->base.numOfParams = numOfParam; + + for (int32_t j = 0; j < numOfParam; ++j) { + SNode* p1 = nodesListGetNode(pFuncNode->pParameterList, j); + if (p1->type == QUERY_NODE_COLUMN) { + SColumnNode* pcn = (SColumnNode*)p1; + + pExp->base.pParam[j].type = FUNC_PARAM_TYPE_COLUMN; + pExp->base.pParam[j].pCol = createColumn(pcn->dataBlockId, pcn->slotId, pcn->colId, &pcn->node.resType); + } else if (p1->type == QUERY_NODE_VALUE) { + SValueNode* pvn = (SValueNode*)p1; + pExp->base.pParam[j].type = FUNC_PARAM_TYPE_VALUE; + nodesValueNodeToVariant(pvn, &pExp->base.pParam[j].param); + } + } + } else if (type == QUERY_NODE_OPERATOR) { + pExp->pExpr->nodeType = QUERY_NODE_OPERATOR; + SOperatorNode* pNode = (SOperatorNode*)pTargetNode->pExpr; + + pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam)); + pExp->base.numOfParams = 1; + + SDataType* pType = &pNode->node.resType; + pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale, + pType->precision, pNode->node.aliasName); + pExp->pExpr->_optrRoot.pRootNode = pTargetNode->pExpr; + } else { + ASSERT(0); + } + } + + return pExprs; +} + +// set the output buffer for the selectivity + tag query +static int32_t setSelectValueColumnInfo(SqlFunctionCtx* pCtx, int32_t numOfOutput) { + int32_t num = 0; + + SqlFunctionCtx* p = NULL; + SqlFunctionCtx** pValCtx = taosMemoryCalloc(numOfOutput, POINTER_BYTES); + if (pValCtx == NULL) { + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } + + for (int32_t i = 0; i < numOfOutput; ++i) { + if (strcmp(pCtx[i].pExpr->pExpr->_function.functionName, "_select_value") == 0) { + pValCtx[num++] = &pCtx[i]; + } else if (fmIsSelectFunc(pCtx[i].functionId)) { + p = &pCtx[i]; + } + } + + if (p != NULL) { + p->subsidiaries.pCtx = pValCtx; + p->subsidiaries.num = num; } else { - return (pc1->groupId < pc2->groupId)? -1:1; + taosMemoryFreeClear(pValCtx); + } + + return TSDB_CODE_SUCCESS; +} + +SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowCellInfoOffset) { + SqlFunctionCtx* pFuncCtx = (SqlFunctionCtx*)taosMemoryCalloc(numOfOutput, sizeof(SqlFunctionCtx)); + if (pFuncCtx == NULL) { + return NULL; + } + + *rowCellInfoOffset = taosMemoryCalloc(numOfOutput, sizeof(int32_t)); + if (*rowCellInfoOffset == 0) { + taosMemoryFreeClear(pFuncCtx); + return NULL; + } + + for (int32_t i = 0; i < numOfOutput; ++i) { + SExprInfo* pExpr = &pExprInfo[i]; + + SExprBasicInfo* pFunct = &pExpr->base; + SqlFunctionCtx* pCtx = &pFuncCtx[i]; + + pCtx->functionId = -1; + pCtx->curBufPage = -1; + pCtx->pExpr = pExpr; + + if (pExpr->pExpr->nodeType == QUERY_NODE_FUNCTION) { + SFuncExecEnv env = {0}; + pCtx->functionId = pExpr->pExpr->_function.pFunctNode->funcId; + + if (fmIsAggFunc(pCtx->functionId) || fmIsIndefiniteRowsFunc(pCtx->functionId)) { + bool isUdaf = fmIsUserDefinedFunc(pCtx->functionId); + if (!isUdaf) { + fmGetFuncExecFuncs(pCtx->functionId, &pCtx->fpSet); + } else { + char* udfName = pExpr->pExpr->_function.pFunctNode->functionName; + strncpy(pCtx->udfName, udfName, strlen(udfName)); + fmGetUdafExecFuncs(pCtx->functionId, &pCtx->fpSet); + } + pCtx->fpSet.getEnv(pExpr->pExpr->_function.pFunctNode, &env); + } else { + fmGetScalarFuncExecFuncs(pCtx->functionId, &pCtx->sfp); + if (pCtx->sfp.getEnv != NULL) { + pCtx->sfp.getEnv(pExpr->pExpr->_function.pFunctNode, &env); + } + } + pCtx->resDataInfo.interBufSize = env.calcMemSize; + } else if (pExpr->pExpr->nodeType == QUERY_NODE_COLUMN || pExpr->pExpr->nodeType == QUERY_NODE_OPERATOR || + pExpr->pExpr->nodeType == QUERY_NODE_VALUE) { + // for simple column, the result buffer needs to hold at least one element. + pCtx->resDataInfo.interBufSize = pFunct->resSchema.bytes; + } + + pCtx->input.numOfInputCols = pFunct->numOfParams; + pCtx->input.pData = taosMemoryCalloc(pFunct->numOfParams, POINTER_BYTES); + pCtx->input.pColumnDataAgg = taosMemoryCalloc(pFunct->numOfParams, POINTER_BYTES); + + pCtx->pTsOutput = NULL; + pCtx->resDataInfo.bytes = pFunct->resSchema.bytes; + pCtx->resDataInfo.type = pFunct->resSchema.type; + pCtx->order = TSDB_ORDER_ASC; + pCtx->start.key = INT64_MIN; + pCtx->end.key = INT64_MIN; + pCtx->numOfParams = pExpr->base.numOfParams; + pCtx->increase = false; + + pCtx->param = pFunct->pParam; + } + + for (int32_t i = 1; i < numOfOutput; ++i) { + (*rowCellInfoOffset)[i] = + (int32_t)((*rowCellInfoOffset)[i - 1] + sizeof(SResultRowEntryInfo) + pFuncCtx[i - 1].resDataInfo.interBufSize); + } + + setSelectValueColumnInfo(pFuncCtx, numOfOutput); + return pFuncCtx; +} + +// NOTE: sources columns are more than the destination SSDatablock columns. +void relocateColumnData(SSDataBlock* pBlock, const SArray* pColMatchInfo, SArray* pCols) { + size_t numOfSrcCols = taosArrayGetSize(pCols); + + int32_t i = 0, j = 0; + while (i < numOfSrcCols && j < taosArrayGetSize(pColMatchInfo)) { + SColumnInfoData* p = taosArrayGet(pCols, i); + SColMatchInfo* pmInfo = taosArrayGet(pColMatchInfo, j); + if (!pmInfo->output) { + j++; + continue; + } + + if (p->info.colId == pmInfo->colId) { + SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, pmInfo->targetSlotId); + colDataAssign(pDst, p, pBlock->info.rows); + i++; + j++; + } else if (p->info.colId < pmInfo->colId) { + i++; + } else { + ASSERT(0); + } } } -int32_t tsDescOrder(const void* p1, const void* p2) { - SResultRowCell* pc1 = (SResultRowCell*) p1; - SResultRowCell* pc2 = (SResultRowCell*) p2; +SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode) { + SInterval interval = { + .interval = pTableScanNode->interval, + .sliding = pTableScanNode->sliding, + .intervalUnit = pTableScanNode->intervalUnit, + .slidingUnit = pTableScanNode->slidingUnit, + .offset = pTableScanNode->offset, + }; - if (pc1->groupId == pc2->groupId) { - ASSERT(0); -// if (pc1->pRow->win.skey == pc2->pRow->win.skey) { -// return 0; -// } else { -// return (pc1->pRow->win.skey < pc2->pRow->win.skey)? 1:-1; -// } - } else { - return (pc1->groupId < pc2->groupId)? -1:1; + return interval; +} + +SColumn extractColumnFromColumnNode(SColumnNode* pColNode) { + SColumn c = {0}; + c.slotId = pColNode->slotId; + c.colId = pColNode->colId; + c.type = pColNode->node.resType.type; + c.bytes = pColNode->node.resType.bytes; + c.scale = pColNode->node.resType.scale; + c.precision = pColNode->node.resType.precision; + return c; +} + +int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysiNode* pTableScanNode) { + pCond->loadExternalRows = false; + + pCond->order = pTableScanNode->scanSeq[0] > 0 ? TSDB_ORDER_ASC : TSDB_ORDER_DESC; + pCond->numOfCols = LIST_LENGTH(pTableScanNode->scan.pScanCols); + pCond->colList = taosMemoryCalloc(pCond->numOfCols, sizeof(SColumnInfo)); + if (pCond->colList == NULL) { + terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; + return terrno; } + + // pCond->twindow = pTableScanNode->scanRange; + // TODO: get it from stable scan node + pCond->numOfTWindows = 1; + pCond->twindows = taosMemoryCalloc(pCond->numOfTWindows, sizeof(STimeWindow)); + pCond->twindows[0] = pTableScanNode->scanRange; + pCond->suid = pTableScanNode->scan.suid; + +#if 1 + // todo work around a problem, remove it later + for (int32_t i = 0; i < pCond->numOfTWindows; ++i) { + if ((pCond->order == TSDB_ORDER_ASC && pCond->twindows[i].skey > pCond->twindows[i].ekey) || + (pCond->order == TSDB_ORDER_DESC && pCond->twindows[i].skey < pCond->twindows[i].ekey)) { + TSWAP(pCond->twindows[i].skey, pCond->twindows[i].ekey); + } + } +#endif + + for (int32_t i = 0; i < pCond->numOfTWindows; ++i) { + if ((pCond->order == TSDB_ORDER_ASC && pCond->twindows[i].skey > pCond->twindows[i].ekey) || + (pCond->order == TSDB_ORDER_DESC && pCond->twindows[i].skey < pCond->twindows[i].ekey)) { + TSWAP(pCond->twindows[i].skey, pCond->twindows[i].ekey); + } + } + taosqsort(pCond->twindows, pCond->numOfTWindows, sizeof(STimeWindow), pCond, compareTimeWindow); + + pCond->type = BLOCK_LOAD_OFFSET_SEQ_ORDER; + // pCond->type = pTableScanNode->scanFlag; + + int32_t j = 0; + for (int32_t i = 0; i < pCond->numOfCols; ++i) { + STargetNode* pNode = (STargetNode*)nodesListGetNode(pTableScanNode->scan.pScanCols, i); + SColumnNode* pColNode = (SColumnNode*)pNode->pExpr; + if (pColNode->colType == COLUMN_TYPE_TAG) { + continue; + } + + pCond->colList[j].type = pColNode->node.resType.type; + pCond->colList[j].bytes = pColNode->node.resType.bytes; + pCond->colList[j].colId = pColNode->colId; + j += 1; + } + + pCond->numOfCols = j; + return TSDB_CODE_SUCCESS; } -void orderTheResultRows(STaskRuntimeEnv* pRuntimeEnv) { - __compar_fn_t fn = NULL; -// if (pRuntimeEnv->pQueryAttr->order.order == TSDB_ORDER_ASC) { -// fn = tsAscOrder; -// } else { -// fn = tsDescOrder; -// } - - taosArraySort(pRuntimeEnv->pResultRowArrayList, fn); -} +void cleanupQueryTableDataCond(SQueryTableDataCond* pCond) { + taosMemoryFree(pCond->twindows); + taosMemoryFree(pCond->colList); +} \ No newline at end of file diff --git a/source/libs/executor/src/executorMain.c b/source/libs/executor/src/executorMain.c index 00158d7024..ff281dacbd 100644 --- a/source/libs/executor/src/executorMain.c +++ b/source/libs/executor/src/executorMain.c @@ -219,4 +219,23 @@ int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, int32_t *resNum, SExplainExecInfo return getOperatorExplainExecInfo(pTaskInfo->pRoot, pRes, &capacity, resNum); } +int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len) { + SExecTaskInfo* pTaskInfo = (struct SExecTaskInfo*)tinfo; + if (pTaskInfo->pRoot == NULL) { + return TSDB_CODE_INVALID_PARA; + } + + return encodeOperator(pTaskInfo->pRoot, pOutput, len); +} + +int32_t qDeserializeTaskStatus(qTaskInfo_t tinfo, const char* pInput, int32_t len) { + SExecTaskInfo* pTaskInfo = (struct SExecTaskInfo*) tinfo; + + if (pTaskInfo == NULL || pInput == NULL || len == 0) { + return TSDB_CODE_INVALID_PARA; + } + + return decodeOperator(pTaskInfo->pRoot, pInput, len); +} + diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 40b019eb5d..7ae9f54361 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -174,40 +174,6 @@ static int compareRowData(const void* a, const void* b, const void* userData) { } // setup the output buffer for each operator -SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode) { - int32_t numOfCols = LIST_LENGTH(pNode->pSlots); - - SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock)); - pBlock->pDataBlock = taosArrayInit(numOfCols, sizeof(SColumnInfoData)); - - pBlock->info.blockId = pNode->dataBlockId; - pBlock->info.rowSize = pNode->totalRowSize; // todo ?? - pBlock->info.type = STREAM_INVALID; - - for (int32_t i = 0; i < numOfCols; ++i) { - SColumnInfoData idata = {{0}}; - SSlotDescNode* pDescNode = (SSlotDescNode*)nodesListGetNode(pNode->pSlots, i); - // if (!pDescNode->output) { // todo disable it temporarily - // continue; - // } - - idata.info.type = pDescNode->dataType.type; - idata.info.bytes = pDescNode->dataType.bytes; - idata.info.scale = pDescNode->dataType.scale; - idata.info.slotId = pDescNode->slotId; - idata.info.precision = pDescNode->dataType.precision; - - if (IS_VAR_DATA_TYPE(idata.info.type)) { - pBlock->info.hasVarCol = true; - } - - taosArrayPush(pBlock->pDataBlock, &idata); - } - - pBlock->info.numOfCols = taosArrayGetSize(pBlock->pDataBlock); - return pBlock; -} - static bool hasNull(SColumn* pColumn, SColumnDataAgg* pStatis) { if (TSDB_COL_IS_TAG(pColumn->flag) || TSDB_COL_IS_UD_COL(pColumn->flag) || pColumn->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { @@ -802,20 +768,6 @@ static void setResultRowKey(SResultRow* pResultRow, char* pData, int16_t type) { } } -int32_t setGroupResultOutputBuf(SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, int16_t type, int16_t bytes, - int32_t groupId, SDiskbasedBuf* pBuf, SExecTaskInfo* pTaskInfo, - SAggSupporter* pAggSup) { - SResultRowInfo* pResultRowInfo = &binfo->resultRowInfo; - SqlFunctionCtx* pCtx = binfo->pCtx; - - SResultRow* pResultRow = - doSetResultOutBufByKey(pBuf, pResultRowInfo, (char*)pData, bytes, true, groupId, pTaskInfo, false, pAggSup); - assert(pResultRow != NULL); - - setResultRowInitCtx(pResultRow, pCtx, numOfCols, binfo->rowCellInfoOffset); - return TSDB_CODE_SUCCESS; -} - bool functionNeedToExecute(SqlFunctionCtx* pCtx) { struct SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); @@ -927,137 +879,6 @@ void setBlockStatisInfo(SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, SSDataBlock* // } } -// set the output buffer for the selectivity + tag query -static int32_t setSelectValueColumnInfo(SqlFunctionCtx* pCtx, int32_t numOfOutput) { - int32_t num = 0; - - SqlFunctionCtx* p = NULL; - SqlFunctionCtx** pValCtx = taosMemoryCalloc(numOfOutput, POINTER_BYTES); - if (pValCtx == NULL) { - return TSDB_CODE_QRY_OUT_OF_MEMORY; - } - - for (int32_t i = 0; i < numOfOutput; ++i) { - if (strcmp(pCtx[i].pExpr->pExpr->_function.functionName, "_select_value") == 0) { - pValCtx[num++] = &pCtx[i]; - } else if (fmIsSelectFunc(pCtx[i].functionId)) { - p = &pCtx[i]; - } - // if (functionId == FUNCTION_TAG_DUMMY || functionId == FUNCTION_TS_DUMMY) { - // tagLen += pCtx[i].resDataInfo.bytes; - // pTagCtx[num++] = &pCtx[i]; - // } else if (functionId == FUNCTION_TS || functionId == FUNCTION_TAG) { - // // tag function may be the group by tag column - // // ts may be the required primary timestamp column - // continue; - // } else { - // // the column may be the normal column, group by normal_column, the functionId is FUNCTION_PRJ - // } - } - - if (p != NULL) { - p->subsidiaries.pCtx = pValCtx; - p->subsidiaries.num = num; - } else { - taosMemoryFreeClear(pValCtx); - } - - return TSDB_CODE_SUCCESS; -} - -SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowCellInfoOffset) { - SqlFunctionCtx* pFuncCtx = (SqlFunctionCtx*)taosMemoryCalloc(numOfOutput, sizeof(SqlFunctionCtx)); - if (pFuncCtx == NULL) { - return NULL; - } - - *rowCellInfoOffset = taosMemoryCalloc(numOfOutput, sizeof(int32_t)); - if (*rowCellInfoOffset == 0) { - taosMemoryFreeClear(pFuncCtx); - return NULL; - } - - for (int32_t i = 0; i < numOfOutput; ++i) { - SExprInfo* pExpr = &pExprInfo[i]; - - SExprBasicInfo* pFunct = &pExpr->base; - SqlFunctionCtx* pCtx = &pFuncCtx[i]; - - pCtx->functionId = -1; - pCtx->curBufPage = -1; - pCtx->pExpr = pExpr; - - if (pExpr->pExpr->nodeType == QUERY_NODE_FUNCTION) { - SFuncExecEnv env = {0}; - pCtx->functionId = pExpr->pExpr->_function.pFunctNode->funcId; - - if (fmIsAggFunc(pCtx->functionId) || fmIsIndefiniteRowsFunc(pCtx->functionId)) { - bool isUdaf = fmIsUserDefinedFunc(pCtx->functionId); - if (!isUdaf) { - fmGetFuncExecFuncs(pCtx->functionId, &pCtx->fpSet); - } else { - char* udfName = pExpr->pExpr->_function.pFunctNode->functionName; - strncpy(pCtx->udfName, udfName, strlen(udfName)); - fmGetUdafExecFuncs(pCtx->functionId, &pCtx->fpSet); - } - pCtx->fpSet.getEnv(pExpr->pExpr->_function.pFunctNode, &env); - } else { - fmGetScalarFuncExecFuncs(pCtx->functionId, &pCtx->sfp); - if (pCtx->sfp.getEnv != NULL) { - pCtx->sfp.getEnv(pExpr->pExpr->_function.pFunctNode, &env); - } - } - pCtx->resDataInfo.interBufSize = env.calcMemSize; - } else if (pExpr->pExpr->nodeType == QUERY_NODE_COLUMN || pExpr->pExpr->nodeType == QUERY_NODE_OPERATOR || - pExpr->pExpr->nodeType == QUERY_NODE_VALUE) { - // for simple column, the result buffer needs to hold at least one element. - pCtx->resDataInfo.interBufSize = pFunct->resSchema.bytes; - } - - pCtx->input.numOfInputCols = pFunct->numOfParams; - pCtx->input.pData = taosMemoryCalloc(pFunct->numOfParams, POINTER_BYTES); - pCtx->input.pColumnDataAgg = taosMemoryCalloc(pFunct->numOfParams, POINTER_BYTES); - - pCtx->pTsOutput = NULL; - pCtx->resDataInfo.bytes = pFunct->resSchema.bytes; - pCtx->resDataInfo.type = pFunct->resSchema.type; - pCtx->order = TSDB_ORDER_ASC; - pCtx->start.key = INT64_MIN; - pCtx->end.key = INT64_MIN; - pCtx->numOfParams = pExpr->base.numOfParams; - pCtx->increase = false; - - pCtx->param = pFunct->pParam; - } - - for (int32_t i = 1; i < numOfOutput; ++i) { - (*rowCellInfoOffset)[i] = - (int32_t)((*rowCellInfoOffset)[i - 1] + sizeof(SResultRowEntryInfo) + pFuncCtx[i - 1].resDataInfo.interBufSize); - } - - setSelectValueColumnInfo(pFuncCtx, numOfOutput); - return pFuncCtx; -} - -static void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) { - if (pCtx == NULL) { - return NULL; - } - - for (int32_t i = 0; i < numOfOutput; ++i) { - for (int32_t j = 0; j < pCtx[i].numOfParams; ++j) { - taosVariantDestroy(&pCtx[i].param[j].param); - } - - taosMemoryFreeClear(pCtx[i].subsidiaries.pCtx); - taosMemoryFree(pCtx[i].input.pData); - taosMemoryFree(pCtx[i].input.pColumnDataAgg); - } - - taosMemoryFreeClear(pCtx); - return NULL; -} - bool isTaskKilled(SExecTaskInfo* pTaskInfo) { // query has been executed more than tsShellActivityTimer, and the retrieve has not arrived // abort current query execution. @@ -1568,11 +1389,10 @@ void initResultRow(SResultRow* pResultRow) { void setFunctionResultOutput(SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t stage, int32_t numOfExprs, SExecTaskInfo* pTaskInfo) { SqlFunctionCtx* pCtx = pInfo->pCtx; - SSDataBlock* pDataBlock = pInfo->pRes; int32_t* rowCellInfoOffset = pInfo->rowCellInfoOffset; SResultRowInfo* pResultRowInfo = &pInfo->resultRowInfo; - initResultRowInfo(pResultRowInfo, 16); + initResultRowInfo(pResultRowInfo); int64_t tid = 0; int64_t groupId = 0; @@ -1580,7 +1400,7 @@ void setFunctionResultOutput(SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t pTaskInfo, false, pSup); for (int32_t i = 0; i < numOfExprs; ++i) { - struct SResultRowEntryInfo* pEntry = getResultCell(pRow, i, rowCellInfoOffset); + struct SResultRowEntryInfo* pEntry = getResultEntryInfo(pRow, i, rowCellInfoOffset); cleanupResultRowEntry(pEntry); pCtx[i].resultInfo = pEntry; @@ -1590,42 +1410,6 @@ void setFunctionResultOutput(SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t initCtxOutputBuffer(pCtx, numOfExprs); } -void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t* bufCapacity, int32_t numOfInputRows) { - SSDataBlock* pDataBlock = pBInfo->pRes; - - int32_t newSize = pDataBlock->info.rows + numOfInputRows + 5; // extra output buffer - if ((*bufCapacity) < newSize) { - for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) { - SColumnInfoData* pColInfo = taosArrayGet(pDataBlock->pDataBlock, i); - - char* p = taosMemoryRealloc(pColInfo->pData, newSize * pColInfo->info.bytes); - if (p != NULL) { - pColInfo->pData = p; - - // it starts from the tail of the previously generated results. - pBInfo->pCtx[i].pOutput = pColInfo->pData; - (*bufCapacity) = newSize; - } else { - // longjmp - } - } - } - - for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) { - SColumnInfoData* pColInfo = taosArrayGet(pDataBlock->pDataBlock, i); - pBInfo->pCtx[i].pOutput = pColInfo->pData + pColInfo->info.bytes * pDataBlock->info.rows; - - // set the correct pointer after the memory buffer reallocated. - int32_t functionId = pBInfo->pCtx[i].functionId; -#if 0 - if (functionId == FUNCTION_TOP || functionId == FUNCTION_BOTTOM || functionId == FUNCTION_DIFF || - functionId == FUNCTION_DERIVATIVE) { - // if (i > 0) pBInfo->pCtx[i].pTsOutput = pBInfo->pCtx[i - 1].pOutput; - } -#endif - } -} - void initCtxOutputBuffer(SqlFunctionCtx* pCtx, int32_t size) { for (int32_t j = 0; j < size; ++j) { struct SResultRowEntryInfo* pResInfo = GET_RES_INFO(&pCtx[j]); @@ -1659,7 +1443,7 @@ void destroyTableQueryInfoImpl(STableQueryInfo* pTableQueryInfo) { void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowCellInfoOffset) { for (int32_t i = 0; i < numOfOutput; ++i) { - pCtx[i].resultInfo = getResultCell(pResult, i, rowCellInfoOffset); + pCtx[i].resultInfo = getResultEntryInfo(pResult, i, rowCellInfoOffset); struct SResultRowEntryInfo* pResInfo = pCtx[i].resultInfo; if (isRowEntryCompleted(pResInfo) && isRowEntryInitialized(pResInfo)) { @@ -1793,7 +1577,7 @@ void setExecutionContext(int32_t numOfOutput, uint64_t groupId, SExecTaskInfo* p static void doUpdateNumOfRows(SResultRow* pRow, int32_t numOfExprs, const int32_t* rowCellOffset) { for (int32_t j = 0; j < numOfExprs; ++j) { - struct SResultRowEntryInfo* pResInfo = getResultCell(pRow, j, rowCellOffset); + struct SResultRowEntryInfo* pResInfo = getResultEntryInfo(pRow, j, rowCellOffset); if (!isRowEntryInitialized(pResInfo)) { continue; } @@ -1829,7 +1613,7 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi for (int32_t j = 0; j < numOfExprs; ++j) { int32_t slotId = pExprInfo[j].base.resSchema.slotId; - pCtx[j].resultInfo = getResultCell(pRow, j, rowCellOffset); + pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowCellOffset); if (pCtx[j].fpSet.finalize) { int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock); if (TAOS_FAILED(code)) { @@ -1894,7 +1678,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI for (int32_t j = 0; j < numOfExprs; ++j) { int32_t slotId = pExprInfo[j].base.resSchema.slotId; - pCtx[j].resultInfo = getResultCell(pRow, j, rowCellOffset); + pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowCellOffset); if (pCtx[j].fpSet.finalize) { int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock); if (TAOS_FAILED(code)) { @@ -1946,7 +1730,7 @@ void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SG SqlFunctionCtx* pCtx = pbInfo->pCtx; blockDataCleanup(pBlock); - if (!hashRemainDataInGroupInfo(pGroupResInfo)) { + if (!hasDataInGroupInfo(pGroupResInfo)) { return; } @@ -1971,7 +1755,7 @@ static void updateNumOfRowsInResultRows(SqlFunctionCtx* pCtx, int32_t numOfOutpu continue; } - SResultRowEntryInfo* pCell = getResultCell(pResult, j, rowCellInfoOffset); + SResultRowEntryInfo* pCell = getResultEntryInfo(pResult, j, rowCellInfoOffset); pResult->numOfRows = (uint16_t)(TMAX(pResult->numOfRows, pCell->numOfRes)); } } @@ -2413,33 +2197,7 @@ static int32_t doSendFetchDataRequest(SExchangeInfo* pExchangeInfo, SExecTaskInf return TSDB_CODE_SUCCESS; } -// NOTE: sources columns are more than the destination SSDatablock columns. -void relocateColumnData(SSDataBlock* pBlock, const SArray* pColMatchInfo, SArray* pCols) { - size_t numOfSrcCols = taosArrayGetSize(pCols); - - int32_t i = 0, j = 0; - while (i < numOfSrcCols && j < taosArrayGetSize(pColMatchInfo)) { - SColumnInfoData* p = taosArrayGet(pCols, i); - SColMatchInfo* pmInfo = taosArrayGet(pColMatchInfo, j); - if (!pmInfo->output) { - j++; - continue; - } - - if (p->info.colId == pmInfo->colId) { - SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, pmInfo->targetSlotId); - colDataAssign(pDst, p, pBlock->info.rows); - i++; - j++; - } else if (p->info.colId < pmInfo->colId) { - i++; - } else { - ASSERT(0); - } - } -} - -int32_t setDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadInfo, int32_t numOfRows, char* pData, +int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadInfo, int32_t numOfRows, char* pData, int32_t compLen, int32_t numOfOutput, int64_t startTs, uint64_t* total, SArray* pColList) { if (pColList == NULL) { // data from other sources @@ -2565,7 +2323,7 @@ static SSDataBlock* concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SEx } SRetrieveTableRsp* pTableRsp = pDataInfo->pRsp; - code = setDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data, + code = extractDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data, pTableRsp->compLen, pTableRsp->numOfCols, startTs, &pDataInfo->totalRows, NULL); if (code != 0) { taosMemoryFreeClear(pDataInfo->pRsp); @@ -2680,7 +2438,7 @@ static SSDataBlock* seqLoadRemoteData(SOperatorInfo* pOperator) { SSDataBlock* pRes = pExchangeInfo->pResult; SRetrieveTableRsp* pTableRsp = pDataInfo->pRsp; int32_t code = - setDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data, + extractDataBlockFromFetchRsp(pExchangeInfo->pResult, pLoadInfo, pTableRsp->numOfRows, pTableRsp->data, pTableRsp->compLen, pTableRsp->numOfCols, startTs, &pDataInfo->totalRows, NULL); if (pRsp->completed == 1) { @@ -3073,7 +2831,7 @@ static SSDataBlock* doSortedMerge(SOperatorInfo* pOperator) { } int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; - pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, NULL, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, + pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage, pInfo->binfo.pRes, "GET_TASKID(pTaskInfo)"); tsortSetFetchRawDataFp(pInfo->pSortHandle, loadNextDataBlock, NULL, NULL); @@ -3152,7 +2910,7 @@ SOperatorInfo* createSortedMergeOperatorInfo(SOperatorInfo** downstream, int32_t } pInfo->binfo.pCtx = createSqlFunctionCtx(pExprInfo, num, &pInfo->binfo.rowCellInfoOffset); - initResultRowInfo(&pInfo->binfo.resultRowInfo, (int32_t)1); + initResultRowInfo(&pInfo->binfo.resultRowInfo); if (pInfo->binfo.pCtx == NULL || pInfo->binfo.pRes == NULL) { goto _error; @@ -3316,7 +3074,7 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) { blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); doBuildResultDatablock(pOperator, pInfo, &pAggInfo->groupResInfo, pAggInfo->aggSup.pResultBuf); - if (pInfo->pRes->info.rows == 0 || !hashRemainDataInGroupInfo(&pAggInfo->groupResInfo)) { + if (pInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pAggInfo->groupResInfo)) { doSetOperatorCompleted(pOperator); } @@ -3843,9 +3601,8 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* goto _error; } - int32_t numOfGroup = 10; // todo replaced with true value pInfo->groupId = INT32_MIN; - initResultRowInfo(&pInfo->binfo.resultRowInfo, numOfGroup); + initResultRowInfo(&pInfo->binfo.resultRowInfo); pInfo->pScalarExprInfo = pScalarExprInfo; pInfo->numOfScalarExpr = numOfScalarExpr; @@ -3879,6 +3636,25 @@ _error: return NULL; } +static void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) { + if (pCtx == NULL) { + return NULL; + } + + for (int32_t i = 0; i < numOfOutput; ++i) { + for (int32_t j = 0; j < pCtx[i].numOfParams; ++j) { + taosVariantDestroy(&pCtx[i].param[j].param); + } + + taosMemoryFreeClear(pCtx[i].subsidiaries.pCtx); + taosMemoryFree(pCtx[i].input.pData); + taosMemoryFree(pCtx[i].input.pColumnDataAgg); + } + + taosMemoryFreeClear(pCtx); + return NULL; +} + void doDestroyBasicInfo(SOptrBasicInfo* pInfo, int32_t numOfOutput) { assert(pInfo != NULL); @@ -3957,23 +3733,27 @@ static SArray* setRowTsColumnOutputInfo(SqlFunctionCtx* pCtx, int32_t numOfCols) return pList; } -SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t num, - SSDataBlock* pResBlock, SLimit* pLimit, SLimit* pSlimit, SNode* pCondition, - SExecTaskInfo* pTaskInfo) { +SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* pProjPhyNode, SExecTaskInfo* pTaskInfo) { SProjectOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SProjectOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { goto _error; } - pInfo->limit = *pLimit; - pInfo->slimit = *pSlimit; - pInfo->curOffset = pLimit->offset; - pInfo->curSOffset = pSlimit->offset; - pInfo->binfo.pRes = pResBlock; - pInfo->pFilterNode = pCondition; + int32_t numOfCols = 0; + SExprInfo* pExprInfo = createExprInfo(pProjPhyNode->pProjections, NULL, &numOfCols); + + SSDataBlock* pResBlock = createResDataBlock(pProjPhyNode->node.pOutputDataBlockDesc); + SLimit limit = {.limit = pProjPhyNode->limit, .offset = pProjPhyNode->offset}; + SLimit slimit = {.limit = pProjPhyNode->slimit, .offset = pProjPhyNode->soffset}; + + pInfo->limit = limit; + pInfo->slimit = slimit; + pInfo->curOffset = limit.offset; + pInfo->curSOffset = slimit.offset; + pInfo->binfo.pRes = pResBlock; + pInfo->pFilterNode = pProjPhyNode->node.pConditions; - int32_t numOfCols = num; int32_t numOfRows = 4096; size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES; @@ -3988,14 +3768,14 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SExprInfo* p setFunctionResultOutput(&pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, numOfCols, pTaskInfo); pInfo->pPseudoColInfo = setRowTsColumnOutputInfo(pInfo->binfo.pCtx, numOfCols); - pOperator->name = "ProjectOperator"; + pOperator->name = "ProjectOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_PROJECT; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->info = pInfo; - pOperator->pExpr = pExprInfo; - pOperator->numOfExprs = num; - pOperator->pTaskInfo = pTaskInfo; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->info = pInfo; + pOperator->pExpr = pExprInfo; + pOperator->numOfExprs = numOfCols; + pOperator->pTaskInfo = pTaskInfo; pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doProjectOperation, NULL, NULL, destroyProjectOperatorInfo, NULL, NULL, NULL); @@ -4162,18 +3942,9 @@ static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t } } -SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols, - SInterval* pInterval, STimeWindow* pWindow, SSDataBlock* pResBlock, - int32_t fillType, SNodeListNode* pValueNode, bool multigroupResult, - SExecTaskInfo* pTaskInfo) { - SFillOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SFillOperatorInfo)); - SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); - - pInfo->pRes = pResBlock; - pInfo->multigroupResult = multigroupResult; - +static int32_t convertFillType(int32_t mode) { int32_t type = TSDB_FILL_NONE; - switch (fillType) { + switch (mode) { case FILL_MODE_PREV: type = TSDB_FILL_PREV; break; @@ -4196,26 +3967,46 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExp type = TSDB_FILL_NONE; } + return type; +} + +SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFillNode, bool multigroupResult, + SExecTaskInfo* pTaskInfo) { + SFillOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SFillOperatorInfo)); + SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); + if (pInfo == NULL || pOperator == NULL) { + goto _error; + } + + int32_t num = 0; + SSDataBlock* pResBlock = createResDataBlock(pPhyFillNode->node.pOutputDataBlockDesc); + SExprInfo* pExprInfo = createExprInfo(pPhyFillNode->pTargets, NULL, &num); + SInterval* pInterval = &((SIntervalAggOperatorInfo*)downstream->info)->interval; + int32_t type = convertFillType(pPhyFillNode->mode); + SResultInfo* pResultInfo = &pOperator->resultInfo; initResultSizeInfo(pOperator, 4096); - int32_t code = initFillInfo(pInfo, pExpr, numOfCols, pValueNode, *pWindow, pResultInfo->capacity, pTaskInfo->id.str, - pInterval, type); + int32_t code = initFillInfo(pInfo, pExprInfo, num, (SNodeListNode*)pPhyFillNode->pValues, pPhyFillNode->timeRange, + pResultInfo->capacity, pTaskInfo->id.str, pInterval, type); if (code != TSDB_CODE_SUCCESS) { goto _error; } - pOperator->name = "FillOperator"; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; + pInfo->pRes = pResBlock; + pInfo->multigroupResult = multigroupResult; + pOperator->name = "FillOperator"; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_FILL; - pOperator->pExpr = pExpr; - pOperator->numOfExprs = numOfCols; - pOperator->info = pInfo; + pOperator->pExpr = pExprInfo; + pOperator->numOfExprs = num; + pOperator->info = pInfo; + pOperator->pTaskInfo = pTaskInfo; pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doFill, NULL, NULL, destroySFillOperatorInfo, NULL, NULL, NULL); - pOperator->pTaskInfo = pTaskInfo; + code = appendDownstream(pOperator, &downstream, 1); return pOperator; @@ -4225,151 +4016,6 @@ _error: return NULL; } -static SResSchema createResSchema(int32_t type, int32_t bytes, int32_t slotId, int32_t scale, int32_t precision, - const char* name) { - SResSchema s = {0}; - s.scale = scale; - s.type = type; - s.bytes = bytes; - s.slotId = slotId; - s.precision = precision; - strncpy(s.name, name, tListLen(s.name)); - - return s; -} - -static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDataType* pType) { - SColumn* pCol = taosMemoryCalloc(1, sizeof(SColumn)); - if (pCol == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return NULL; - } - - pCol->slotId = slotId; - pCol->colId = colId; - pCol->bytes = pType->bytes; - pCol->type = pType->type; - pCol->scale = pType->scale; - pCol->precision = pType->precision; - pCol->dataBlockId = blockId; - - return pCol; -} - -SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* numOfExprs) { - int32_t numOfFuncs = LIST_LENGTH(pNodeList); - int32_t numOfGroupKeys = 0; - if (pGroupKeys != NULL) { - numOfGroupKeys = LIST_LENGTH(pGroupKeys); - } - - *numOfExprs = numOfFuncs + numOfGroupKeys; - SExprInfo* pExprs = taosMemoryCalloc(*numOfExprs, sizeof(SExprInfo)); - - for (int32_t i = 0; i < (*numOfExprs); ++i) { - STargetNode* pTargetNode = NULL; - if (i < numOfFuncs) { - pTargetNode = (STargetNode*)nodesListGetNode(pNodeList, i); - } else { - pTargetNode = (STargetNode*)nodesListGetNode(pGroupKeys, i - numOfFuncs); - } - - SExprInfo* pExp = &pExprs[i]; - - pExp->pExpr = taosMemoryCalloc(1, sizeof(tExprNode)); - pExp->pExpr->_function.num = 1; - pExp->pExpr->_function.functionId = -1; - - int32_t type = nodeType(pTargetNode->pExpr); - // it is a project query, or group by column - if (type == QUERY_NODE_COLUMN) { - pExp->pExpr->nodeType = QUERY_NODE_COLUMN; - SColumnNode* pColNode = (SColumnNode*)pTargetNode->pExpr; - - pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam)); - pExp->base.numOfParams = 1; - - SDataType* pType = &pColNode->node.resType; - pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale, - pType->precision, pColNode->colName); - pExp->base.pParam[0].pCol = createColumn(pColNode->dataBlockId, pColNode->slotId, pColNode->colId, pType); - pExp->base.pParam[0].type = FUNC_PARAM_TYPE_COLUMN; - } else if (type == QUERY_NODE_VALUE) { - pExp->pExpr->nodeType = QUERY_NODE_VALUE; - SValueNode* pValNode = (SValueNode*)pTargetNode->pExpr; - - pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam)); - pExp->base.numOfParams = 1; - - SDataType* pType = &pValNode->node.resType; - pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale, - pType->precision, pValNode->node.aliasName); - pExp->base.pParam[0].type = FUNC_PARAM_TYPE_VALUE; - nodesValueNodeToVariant(pValNode, &pExp->base.pParam[0].param); - } else if (type == QUERY_NODE_FUNCTION) { - pExp->pExpr->nodeType = QUERY_NODE_FUNCTION; - SFunctionNode* pFuncNode = (SFunctionNode*)pTargetNode->pExpr; - - SDataType* pType = &pFuncNode->node.resType; - pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale, - pType->precision, pFuncNode->node.aliasName); - - pExp->pExpr->_function.functionId = pFuncNode->funcId; - pExp->pExpr->_function.pFunctNode = pFuncNode; - - strncpy(pExp->pExpr->_function.functionName, pFuncNode->functionName, - tListLen(pExp->pExpr->_function.functionName)); -#if 1 - // todo refactor: add the parameter for tbname function - if (strcmp(pExp->pExpr->_function.functionName, "tbname") == 0) { - pFuncNode->pParameterList = nodesMakeList(); - ASSERT(LIST_LENGTH(pFuncNode->pParameterList) == 0); - SValueNode* res = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); - if (NULL == res) { // todo handle error - } else { - res->node.resType = (SDataType){.bytes = sizeof(int64_t), .type = TSDB_DATA_TYPE_BIGINT}; - nodesListAppend(pFuncNode->pParameterList, (SNode*)res); - } - } -#endif - - int32_t numOfParam = LIST_LENGTH(pFuncNode->pParameterList); - - pExp->base.pParam = taosMemoryCalloc(numOfParam, sizeof(SFunctParam)); - pExp->base.numOfParams = numOfParam; - - for (int32_t j = 0; j < numOfParam; ++j) { - SNode* p1 = nodesListGetNode(pFuncNode->pParameterList, j); - if (p1->type == QUERY_NODE_COLUMN) { - SColumnNode* pcn = (SColumnNode*)p1; - - pExp->base.pParam[j].type = FUNC_PARAM_TYPE_COLUMN; - pExp->base.pParam[j].pCol = createColumn(pcn->dataBlockId, pcn->slotId, pcn->colId, &pcn->node.resType); - } else if (p1->type == QUERY_NODE_VALUE) { - SValueNode* pvn = (SValueNode*)p1; - pExp->base.pParam[j].type = FUNC_PARAM_TYPE_VALUE; - nodesValueNodeToVariant(pvn, &pExp->base.pParam[j].param); - } - } - } else if (type == QUERY_NODE_OPERATOR) { - pExp->pExpr->nodeType = QUERY_NODE_OPERATOR; - SOperatorNode* pNode = (SOperatorNode*)pTargetNode->pExpr; - - pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam)); - pExp->base.numOfParams = 1; - - SDataType* pType = &pNode->node.resType; - pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale, - pType->precision, pNode->node.aliasName); - pExp->pExpr->_optrRoot.pRootNode = pTargetNode->pExpr; - } else { - ASSERT(0); - } - } - - return pExprs; -} - static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPTR_EXEC_MODEL model, char* dbFName) { SExecTaskInfo* pTaskInfo = taosMemoryCalloc(1, sizeof(SExecTaskInfo)); setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED); @@ -4392,8 +4038,6 @@ static tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SRead static SArray* extractColumnInfo(SNodeList* pNodeList); -static SArray* createSortInfo(SNodeList* pNodeList); - int32_t extractTableSchemaVersion(SReadHandle* pHandle, uint64_t uid, SExecTaskInfo* pTaskInfo) { SMetaReader mr = {0}; metaReaderInit(&mr, pHandle->meta, 0); @@ -4560,7 +4204,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo if (pHandle->vnode) { pDataReader = doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond); } else { - getTableList(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableListInfo, pTagCond); + getTableList(pHandle->meta, pScanPhyNode, pTableListInfo, pTagCond); } if (pDataReader == NULL && terrno != 0) { @@ -4587,9 +4231,9 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) { STagScanPhysiNode* pScanPhyNode = (STagScanPhysiNode*)pPhyNode; - int32_t code = getTableList(pHandle->meta, pScanPhyNode->tableType, pScanPhyNode->uid, pTableListInfo, - pScanPhyNode->node.pConditions); + int32_t code = getTableList(pHandle->meta, pScanPhyNode, pTableListInfo, pScanPhyNode->node.pConditions); if (code != TSDB_CODE_SUCCESS) { + pTaskInfo->code = terrno; return NULL; } @@ -4613,14 +4257,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SOperatorInfo* pOptr = NULL; if (QUERY_NODE_PHYSICAL_PLAN_PROJECT == type) { - SProjectPhysiNode* pProjPhyNode = (SProjectPhysiNode*)pPhyNode; - SExprInfo* pExprInfo = createExprInfo(pProjPhyNode->pProjections, NULL, &num); - - SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); - SLimit limit = {.limit = pProjPhyNode->limit, .offset = pProjPhyNode->offset}; - SLimit slimit = {.limit = pProjPhyNode->slimit, .offset = pProjPhyNode->soffset}; - pOptr = createProjectOperatorInfo(ops[0], pExprInfo, num, pResBlock, &limit, &slimit, - pProjPhyNode->node.pConditions, pTaskInfo); + pOptr = createProjectOperatorInfo(ops[0], (SProjectPhysiNode*)pPhyNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_HASH_AGG == type) { SAggPhysiNode* pAggNode = (SAggPhysiNode*)pPhyNode; SExprInfo* pExprInfo = createExprInfo(pAggNode->pAggFuncs, pAggNode->pGroupKeys, &num); @@ -4687,21 +4324,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo int32_t children = 1; pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children); } else if (QUERY_NODE_PHYSICAL_PLAN_SORT == type) { - SSortPhysiNode* pSortPhyNode = (SSortPhysiNode*)pPhyNode; - - SDataBlockDescNode* pDescNode = pPhyNode->pOutputDataBlockDesc; - - SSDataBlock* pResBlock = createResDataBlock(pDescNode); - SArray* info = createSortInfo(pSortPhyNode->pSortKeys); - - int32_t numOfCols = 0; - SExprInfo* pExprInfo = createExprInfo(pSortPhyNode->pExprs, NULL, &numOfCols); - - int32_t numOfOutputCols = 0; - SArray* pColList = - extractColMatchInfo(pSortPhyNode->pTargets, pDescNode, &numOfOutputCols, pTaskInfo, COL_MATCH_FROM_SLOT_ID); - - pOptr = createSortOperatorInfo(ops[0], pResBlock, info, pExprInfo, numOfCols, pColList, pTaskInfo); + pOptr = createSortOperatorInfo(ops[0], (SSortPhysiNode*)pPhyNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE == type) { SMergePhysiNode* pMergePhyNode = (SMergePhysiNode*)pPhyNode; @@ -4711,7 +4334,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo SArray* sortInfo = createSortInfo(pMergePhyNode->pMergeKeys); int32_t numOfOutputCols = 0; SArray* pColList = - extractColMatchInfo(pMergePhyNode->pTargets, pDescNode, &numOfOutputCols, pTaskInfo, COL_MATCH_FROM_SLOT_ID); + extractColMatchInfo(pMergePhyNode->pTargets, pDescNode, &numOfOutputCols, COL_MATCH_FROM_SLOT_ID); SPhysiNode* pChildNode = (SPhysiNode*)nodesListGetNode(pPhyNode->pChildren, 0); SSDataBlock* pInputDataBlock = createResDataBlock(pChildNode->pOutputDataBlockDesc); pOptr = createMultiwaySortMergeOperatorInfo(ops, size, pInputDataBlock, pResBlock, sortInfo, pColList, pTaskInfo); @@ -4728,18 +4351,13 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pOptr = createSessionAggOperatorInfo(ops[0], pExprInfo, num, pResBlock, pSessionNode->gap, tsSlotId, &as, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION == type) { - SSessionWinodwPhysiNode* pSessionNode = (SSessionWinodwPhysiNode*)pPhyNode; - - STimeWindowAggSupp as = {.waterMark = pSessionNode->window.watermark, - .calTrigger = pSessionNode->window.triggerType}; - - SExprInfo* pExprInfo = createExprInfo(pSessionNode->window.pFuncs, NULL, &num); - SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); - int32_t tsSlotId = ((SColumnNode*)pSessionNode->window.pTspk)->slotId; - - pOptr = createStreamSessionAggOperatorInfo(ops[0], pExprInfo, num, pResBlock, pSessionNode->gap, tsSlotId, &as, - pTaskInfo); - + pOptr = createStreamSessionAggOperatorInfo(ops[0], pPhyNode, pTaskInfo); + } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION == type) { + int32_t children = 0; + pOptr = createStreamFinalSessionAggOperatorInfo(ops[0], pPhyNode, pTaskInfo, children); + } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION == type) { + int32_t children = 1; + pOptr = createStreamFinalSessionAggOperatorInfo(ops[0], pPhyNode, pTaskInfo, children); } else if (QUERY_NODE_PHYSICAL_PLAN_PARTITION == type) { pOptr = createPartitionOperatorInfo(ops[0], (SPartitionPhysiNode*)pPhyNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE == type) { @@ -4757,19 +4375,9 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE == type) { pOptr = createStreamStateAggOperatorInfo(ops[0], pPhyNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN == type) { - SJoinPhysiNode* pJoinNode = (SJoinPhysiNode*)pPhyNode; - SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); - - SExprInfo* pExprInfo = createExprInfo(pJoinNode->pTargets, NULL, &num); - pOptr = createMergeJoinOperatorInfo(ops, size, pExprInfo, num, pResBlock, pJoinNode->pOnConditions, pTaskInfo); + pOptr = createMergeJoinOperatorInfo(ops, size, (SJoinPhysiNode*)pPhyNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_FILL == type) { - SFillPhysiNode* pFillNode = (SFillPhysiNode*)pPhyNode; - SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); - SExprInfo* pExprInfo = createExprInfo(pFillNode->pTargets, NULL, &num); - - SInterval* pInterval = &((SIntervalAggOperatorInfo*)ops[0]->info)->interval; - pOptr = createFillOperatorInfo(ops[0], pExprInfo, num, pInterval, &pFillNode->timeRange, pResBlock, pFillNode->mode, - (SNodeListNode*)pFillNode->pValues, false, pTaskInfo); + pOptr = createFillOperatorInfo(ops[0], (SFillPhysiNode*)pPhyNode, false, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC == type) { pOptr = createIndefinitOutputOperatorInfo(ops[0], pPhyNode, pTaskInfo); } else { @@ -4792,79 +4400,6 @@ int32_t compareTimeWindow(const void* p1, const void* p2, const void* param) { return 0; } -int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysiNode* pTableScanNode) { - pCond->loadExternalRows = false; - - pCond->order = pTableScanNode->scanSeq[0] > 0 ? TSDB_ORDER_ASC : TSDB_ORDER_DESC; - pCond->numOfCols = LIST_LENGTH(pTableScanNode->scan.pScanCols); - pCond->colList = taosMemoryCalloc(pCond->numOfCols, sizeof(SColumnInfo)); - if (pCond->colList == NULL) { - terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; - return terrno; - } - - // pCond->twindow = pTableScanNode->scanRange; - // TODO: get it from stable scan node - pCond->numOfTWindows = 1; - pCond->twindows = taosMemoryCalloc(pCond->numOfTWindows, sizeof(STimeWindow)); - pCond->twindows[0] = pTableScanNode->scanRange; - pCond->suid = pTableScanNode->scan.suid; - -#if 1 - // todo work around a problem, remove it later - for (int32_t i = 0; i < pCond->numOfTWindows; ++i) { - if ((pCond->order == TSDB_ORDER_ASC && pCond->twindows[i].skey > pCond->twindows[i].ekey) || - (pCond->order == TSDB_ORDER_DESC && pCond->twindows[i].skey < pCond->twindows[i].ekey)) { - TSWAP(pCond->twindows[i].skey, pCond->twindows[i].ekey); - } - } -#endif - - for (int32_t i = 0; i < pCond->numOfTWindows; ++i) { - if ((pCond->order == TSDB_ORDER_ASC && pCond->twindows[i].skey > pCond->twindows[i].ekey) || - (pCond->order == TSDB_ORDER_DESC && pCond->twindows[i].skey < pCond->twindows[i].ekey)) { - TSWAP(pCond->twindows[i].skey, pCond->twindows[i].ekey); - } - } - taosqsort(pCond->twindows, pCond->numOfTWindows, sizeof(STimeWindow), pCond, compareTimeWindow); - - pCond->type = BLOCK_LOAD_OFFSET_SEQ_ORDER; - // pCond->type = pTableScanNode->scanFlag; - - int32_t j = 0; - for (int32_t i = 0; i < pCond->numOfCols; ++i) { - STargetNode* pNode = (STargetNode*)nodesListGetNode(pTableScanNode->scan.pScanCols, i); - SColumnNode* pColNode = (SColumnNode*)pNode->pExpr; - if (pColNode->colType == COLUMN_TYPE_TAG) { - continue; - } - - pCond->colList[j].type = pColNode->node.resType.type; - pCond->colList[j].bytes = pColNode->node.resType.bytes; - pCond->colList[j].colId = pColNode->colId; - j += 1; - } - - pCond->numOfCols = j; - return TSDB_CODE_SUCCESS; -} - -void clearupQueryTableDataCond(SQueryTableDataCond* pCond) { - taosMemoryFree(pCond->twindows); - taosMemoryFree(pCond->colList); -} - -SColumn extractColumnFromColumnNode(SColumnNode* pColNode) { - SColumn c = {0}; - c.slotId = pColNode->slotId; - c.colId = pColNode->colId; - c.type = pColNode->node.resType.type; - c.bytes = pColNode->node.resType.bytes; - c.scale = pColNode->node.resType.scale; - c.precision = pColNode->node.resType.precision; - return c; -} - SArray* extractColumnInfo(SNodeList* pNodeList) { size_t numOfCols = LIST_LENGTH(pNodeList); SArray* pList = taosArrayInit(numOfCols, sizeof(SColumn)); @@ -4886,7 +4421,7 @@ SArray* extractColumnInfo(SNodeList* pNodeList) { SColumn c = {0}; c.slotId = pNode->slotId; c.colId = pNode->slotId; - c.type = pValNode->node.type; + c.type = pValNode->node.type; c.bytes = pValNode->node.resType.bytes; c.scale = pValNode->node.resType.scale; c.precision = pValNode->node.resType.precision; @@ -4898,146 +4433,10 @@ SArray* extractColumnInfo(SNodeList* pNodeList) { return pList; } -SArray* extractPartitionColInfo(SNodeList* pNodeList) { - if(!pNodeList) { - return NULL; - } - - size_t numOfCols = LIST_LENGTH(pNodeList); - SArray* pList = taosArrayInit(numOfCols, sizeof(SColumn)); - if (pList == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return NULL; - } - - for (int32_t i = 0; i < numOfCols; ++i) { - SColumnNode* pColNode = (SColumnNode*)nodesListGetNode(pNodeList, i); - - // todo extract method - SColumn c = {0}; - c.slotId = pColNode->slotId; - c.colId = pColNode->colId; - c.type = pColNode->node.resType.type; - c.bytes = pColNode->node.resType.bytes; - c.precision = pColNode->node.resType.precision; - c.scale = pColNode->node.resType.scale; - - taosArrayPush(pList, &c); - } - - return pList; -} - -SArray* createSortInfo(SNodeList* pNodeList) { - size_t numOfCols = LIST_LENGTH(pNodeList); - SArray* pList = taosArrayInit(numOfCols, sizeof(SBlockOrderInfo)); - if (pList == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return pList; - } - - for (int32_t i = 0; i < numOfCols; ++i) { - SOrderByExprNode* pSortKey = (SOrderByExprNode*)nodesListGetNode(pNodeList, i); - SBlockOrderInfo bi = {0}; - bi.order = (pSortKey->order == ORDER_ASC) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC; - bi.nullFirst = (pSortKey->nullOrder == NULL_ORDER_FIRST); - - SColumnNode* pColNode = (SColumnNode*)pSortKey->pExpr; - bi.slotId = pColNode->slotId; - taosArrayPush(pList, &bi); - } - - return pList; -} - -SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNodeList, int32_t* numOfOutputCols, - SExecTaskInfo* pTaskInfo, int32_t type) { - size_t numOfCols = LIST_LENGTH(pNodeList); - SArray* pList = taosArrayInit(numOfCols, sizeof(SColMatchInfo)); - if (pList == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return NULL; - } - - for (int32_t i = 0; i < numOfCols; ++i) { - STargetNode* pNode = (STargetNode*)nodesListGetNode(pNodeList, i); - SColumnNode* pColNode = (SColumnNode*)pNode->pExpr; - - SColMatchInfo c = {0}; - c.output = true; - c.colId = pColNode->colId; - c.srcSlotId = pColNode->slotId; - c.matchType = type; - c.targetSlotId = pNode->slotId; - taosArrayPush(pList, &c); - } - - *numOfOutputCols = 0; - int32_t num = LIST_LENGTH(pOutputNodeList->pSlots); - for (int32_t i = 0; i < num; ++i) { - SSlotDescNode* pNode = (SSlotDescNode*)nodesListGetNode(pOutputNodeList->pSlots, i); - - // todo: add reserve flag check - // it is a column reserved for the arithmetic expression calculation - if (pNode->slotId >= numOfCols) { - (*numOfOutputCols) += 1; - continue; - } - - SColMatchInfo* info = taosArrayGet(pList, pNode->slotId); - if (pNode->output) { - (*numOfOutputCols) += 1; - } else { - info->output = false; - } - } - - return pList; -} - -int32_t getTableList(void* metaHandle, int32_t tableType, uint64_t tableUid, STableListInfo* pListInfo, - SNode* pTagCond) { - int32_t code = TSDB_CODE_SUCCESS; - pListInfo->pTableList = taosArrayInit(8, sizeof(STableKeyInfo)); - - if (tableType == TSDB_SUPER_TABLE) { - if (pTagCond) { - SIndexMetaArg metaArg = { - .metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = tsdbGetIvtIdx(metaHandle), .suid = tableUid}; - - SArray* res = taosArrayInit(8, sizeof(uint64_t)); - code = doFilterTag(pTagCond, &metaArg, res); - if (code == TSDB_CODE_INDEX_REBUILDING) { // todo - // doFilter(); - } else if (code != TSDB_CODE_SUCCESS) { - qError("failed to get tableIds, reason: %s, suid: %" PRIu64 "", tstrerror(code), tableUid); - taosArrayDestroy(res); - terrno = code; - return code; - } else { - qDebug("sucess to get tableIds, size: %d, suid: %" PRIu64 "", (int)taosArrayGetSize(res), tableUid); - } - - for (int i = 0; i < taosArrayGetSize(res); i++) { - STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, .uid = *(uint64_t*)taosArrayGet(res, i)}; - taosArrayPush(pListInfo->pTableList, &info); - } - taosArrayDestroy(res); - } else { - code = tsdbGetAllTableList(metaHandle, tableUid, pListInfo->pTableList); - } - } else { // Create one table group. - STableKeyInfo info = {.lastKey = 0, .uid = tableUid}; - taosArrayPush(pListInfo->pTableList, &info); - } - - return code; -} - tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId, SNode* pTagCond) { int32_t code = - getTableList(pHandle->meta, pTableScanNode->scan.tableType, pTableScanNode->scan.uid, pTableListInfo, pTagCond); + getTableList(pHandle->meta, &pTableScanNode->scan, pTableListInfo, pTagCond); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -5055,7 +4454,7 @@ tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* } tsdbReaderT* pReader = tsdbReaderOpen(pHandle->vnode, &cond, pTableListInfo, queryId, taskId); - clearupQueryTableDataCond(&cond); + cleanupQueryTableDataCond(&cond); return pReader; @@ -5116,15 +4515,17 @@ int32_t encodeOperator(SOperatorInfo* ops, char** result, int32_t* length) { return TDB_CODE_SUCCESS; } -int32_t decodeOperator(SOperatorInfo* ops, char* result, int32_t length) { +int32_t decodeOperator(SOperatorInfo* ops, const char* result, int32_t length) { int32_t code = TDB_CODE_SUCCESS; if (ops->fpSet.decodeResultRow) { if (result == NULL) { return TSDB_CODE_TSC_INVALID_INPUT; } + ASSERT(length == *(int32_t*)result); - char* data = result + sizeof(int32_t); - code = ops->fpSet.decodeResultRow(ops, data); + + const char* data = result + sizeof(int32_t); + code = ops->fpSet.decodeResultRow(ops, (char*) data); if (code != TDB_CODE_SUCCESS) { return code; } diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 4fc25688c4..75ba1d5d7b 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -26,8 +26,10 @@ #include "ttypes.h" #include "executorInt.h" +static void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInfo** pGroupInfo, int32_t len); static int32_t* setupColumnOffset(const SSDataBlock* pBlock, int32_t rowCapacity); -static void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInfo** pGroupInfo, int32_t len); +static int32_t setGroupResultOutputBuf(SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, int16_t type, int16_t bytes, + int32_t groupId, SDiskbasedBuf* pBuf, SExecTaskInfo* pTaskInfo, SAggSupporter* pAggSup); static void destroyGroupOperatorInfo(void* param, int32_t numOfOutput) { SGroupbyOperatorInfo* pInfo = (SGroupbyOperatorInfo*)param; @@ -291,7 +293,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) { doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); size_t rows = pRes->info.rows; - if (rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { + if (rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) { doSetOperatorCompleted(pOperator); } @@ -355,7 +357,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) { doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); doFilter(pInfo->pCondition, pRes); - bool hasRemain = hashRemainDataInGroupInfo(&pInfo->groupResInfo); + bool hasRemain = hasDataInGroupInfo(&pInfo->groupResInfo); if (!hasRemain) { doSetOperatorCompleted(pOperator); break; @@ -395,7 +397,7 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx initResultSizeInfo(pOperator, 4096); initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExprInfo, numOfCols, pResultBlock, pInfo->groupKeyLen, pTaskInfo->id.str); - initResultRowInfo(&pInfo->binfo.resultRowInfo, 8); + initResultRowInfo(&pInfo->binfo.resultRowInfo); pOperator->name = "GroupbyAggOperator"; pOperator->blocking = true; @@ -738,4 +740,18 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition taosMemoryFreeClear(pInfo); taosMemoryFreeClear(pOperator); return NULL; +} + +int32_t setGroupResultOutputBuf(SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, int16_t type, int16_t bytes, + int32_t groupId, SDiskbasedBuf* pBuf, SExecTaskInfo* pTaskInfo, + SAggSupporter* pAggSup) { + SResultRowInfo* pResultRowInfo = &binfo->resultRowInfo; + SqlFunctionCtx* pCtx = binfo->pCtx; + + SResultRow* pResultRow = + doSetResultOutBufByKey(pBuf, pResultRowInfo, (char*)pData, bytes, true, groupId, pTaskInfo, false, pAggSup); + assert(pResultRow != NULL); + + setResultRowInitCtx(pResultRow, pCtx, numOfCols, binfo->rowCellInfoOffset); + return TSDB_CODE_SUCCESS; } \ No newline at end of file diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c index 7c8ab244a1..6ac3f1a16c 100644 --- a/source/libs/executor/src/joinoperator.c +++ b/source/libs/executor/src/joinoperator.c @@ -28,27 +28,32 @@ static SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator); static void destroyMergeJoinOperator(void* param, int32_t numOfOutput); static void extractTimeCondition(SJoinOperatorInfo* Info, SLogicConditionNode* pLogicConditionNode); -SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SExprInfo* pExprInfo, - int32_t numOfCols, SSDataBlock* pResBlock, SNode* pOnCondition, - SExecTaskInfo* pTaskInfo) { +SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SJoinPhysiNode* pJoinNode, + SExecTaskInfo* pTaskInfo) { SJoinOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SJoinOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pOperator == NULL || pInfo == NULL) { goto _error; } + SSDataBlock* pResBlock = createResDataBlock(pJoinNode->node.pOutputDataBlockDesc); + + int32_t numOfCols = 0; + SExprInfo* pExprInfo = createExprInfo(pJoinNode->pTargets, NULL, &numOfCols); + initResultSizeInfo(pOperator, 4096); - pInfo->pRes = pResBlock; - pOperator->name = "MergeJoinOperator"; + pInfo->pRes = pResBlock; + pOperator->name = "MergeJoinOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->pExpr = pExprInfo; - pOperator->numOfExprs = numOfCols; - pOperator->info = pInfo; - pOperator->pTaskInfo = pTaskInfo; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->pExpr = pExprInfo; + pOperator->numOfExprs = numOfCols; + pOperator->info = pInfo; + pOperator->pTaskInfo = pTaskInfo; + SNode* pOnCondition = pJoinNode->pOnConditions; if (nodeType(pOnCondition) == QUERY_NODE_OPERATOR) { SOperatorNode* pNode = (SOperatorNode*)pOnCondition; setJoinColumnInfo(&pInfo->leftCol, (SColumnNode*)pNode->pLeft); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 3210d64901..83f8e31ea8 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -496,18 +496,6 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { return NULL; } -SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode) { - SInterval interval = { - .interval = pTableScanNode->interval, - .sliding = pTableScanNode->sliding, - .intervalUnit = pTableScanNode->intervalUnit, - .slidingUnit = pTableScanNode->slidingUnit, - .offset = pTableScanNode->offset, - }; - - return interval; -} - static int32_t getTableScannerExecInfo(struct SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len) { SFileBlockLoadRecorder* pRecorder = taosMemoryCalloc(1, sizeof(SFileBlockLoadRecorder)); STableScanInfo* pTableScanInfo = pOptr->info; @@ -520,7 +508,7 @@ static int32_t getTableScannerExecInfo(struct SOperatorInfo* pOptr, void** pOptr static void destroyTableScanOperatorInfo(void* param, int32_t numOfOutput) { STableScanInfo* pTableScanInfo = (STableScanInfo*)param; blockDataDestroy(pTableScanInfo->pResBlock); - clearupQueryTableDataCond(&pTableScanInfo->cond); + cleanupQueryTableDataCond(&pTableScanInfo->cond); tsdbCleanupReadHandle(pTableScanInfo->dataReader); @@ -537,11 +525,12 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, goto _error; } + //taosSsleep(20); + SDataBlockDescNode* pDescNode = pTableScanNode->scan.node.pOutputDataBlockDesc; int32_t numOfCols = 0; - SArray* pColList = - extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID); + SArray* pColList = extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID); int32_t code = initQueryTableDataCond(&pInfo->cond, pTableScanNode); if (code != TSDB_CODE_SUCCESS) { @@ -1064,8 +1053,7 @@ SOperatorInfo* createStreamScanOperatorInfo(void* pDataReader, SReadHandle* pHan STableScanInfo* pSTInfo = (STableScanInfo*)pTableScanDummy->info; int32_t numOfCols = 0; - pInfo->pColMatchInfo = - extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID); + pInfo->pColMatchInfo = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID); int32_t numOfOutput = taosArrayGetSize(pInfo->pColMatchInfo); SArray* pColIds = taosArrayInit(numOfOutput, sizeof(int16_t)); @@ -1545,7 +1533,7 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { } } - setDataBlockFromFetchRsp(pInfo->pRes, &pInfo->loadInfo, pRsp->numOfRows, pRsp->data, pRsp->compLen, + extractDataBlockFromFetchRsp(pInfo->pRes, &pInfo->loadInfo, pRsp->numOfRows, pRsp->data, pRsp->compLen, pOperator->numOfExprs, startTs, NULL, pInfo->scanCols); // todo log the filter info @@ -1634,7 +1622,7 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan SSDataBlock* pResBlock = createResDataBlock(pDescNode); int32_t num = 0; - SArray* colList = extractColMatchInfo(pScanNode->pScanCols, pDescNode, &num, pTaskInfo, COL_MATCH_FROM_COL_ID); + SArray* colList = extractColMatchInfo(pScanNode->pScanCols, pDescNode, &num, COL_MATCH_FROM_COL_ID); pInfo->accountId = pScanPhyNode->accountId; pInfo->showRewrite = pScanPhyNode->showRewrite; @@ -1840,27 +1828,26 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi SDataBlockDescNode* pDescNode = pPhyNode->node.pOutputDataBlockDesc; + int32_t num = 0; int32_t numOfExprs = 0; SExprInfo* pExprInfo = createExprInfo(pPhyNode->pScanPseudoCols, NULL, &numOfExprs); + SArray* colList = extractColMatchInfo(pPhyNode->pScanPseudoCols, pDescNode, &num, COL_MATCH_FROM_COL_ID); - int32_t num = 0; - SArray* colList = extractColMatchInfo(pPhyNode->pScanPseudoCols, pDescNode, &num, pTaskInfo, COL_MATCH_FROM_COL_ID); + pInfo->pTableList = pTableListInfo; + pInfo->pColMatchInfo = colList; + pInfo->pRes = createResDataBlock(pDescNode); + pInfo->readHandle = *pReadHandle; + pInfo->curPos = 0; + pInfo->pFilterNode = pPhyNode->node.pConditions; - pInfo->pTableList = pTableListInfo; - pInfo->pColMatchInfo = colList; - pInfo->pRes = createResDataBlock(pDescNode); - ; - pInfo->readHandle = *pReadHandle; - pInfo->curPos = 0; - pInfo->pFilterNode = pPhyNode->node.pConditions; - pOperator->name = "TagScanOperator"; + pOperator->name = "TagScanOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN; - pOperator->blocking = false; - pOperator->status = OP_NOT_OPENED; - pOperator->info = pInfo; - pOperator->pExpr = pExprInfo; - pOperator->numOfExprs = numOfExprs; - pOperator->pTaskInfo = pTaskInfo; + pOperator->blocking = false; + pOperator->status = OP_NOT_OPENED; + pOperator->info = pInfo; + pOperator->pExpr = pExprInfo; + pOperator->numOfExprs = numOfExprs; + pOperator->pTaskInfo = pTaskInfo; initResultSizeInfo(pOperator, 4096); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); @@ -1930,7 +1917,7 @@ int32_t createMultipleDataReaders(STableScanPhysiNode* pTableScanNode, SReadHand STableListInfo* pTableListInfo, SArray* arrayReader, uint64_t queryId, uint64_t taskId, SNode* pTagCond) { int32_t code = - getTableList(pHandle->meta, pTableScanNode->scan.tableType, pTableScanNode->scan.uid, pTableListInfo, pTagCond); + getTableList(pHandle->meta, &pTableScanNode->scan, pTableListInfo, pTagCond); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -1957,7 +1944,7 @@ int32_t createMultipleDataReaders(STableScanPhysiNode* pTableScanNode, SReadHand taosArrayDestroy(subListInfo->pTableList); taosMemoryFree(subListInfo); } - clearupQueryTableDataCond(&cond); + cleanupQueryTableDataCond(&cond); return 0; @@ -2155,7 +2142,7 @@ int32_t doOpenTableMergeScanOperator(SOperatorInfo* pOperator) { int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; pInfo->pSortHandle = - tsortCreateSortHandle(pInfo->pSortInfo, pInfo->pColMatchInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, + tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage, pInfo->pSortInputBlock, pTaskInfo->id.str); tsortSetFetchRawDataFp(pInfo->pSortHandle, getTableDataBlock, NULL, NULL); @@ -2233,7 +2220,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) { void destroyTableMergeScanOperatorInfo(void* param, int32_t numOfOutput) { STableMergeScanInfo* pTableScanInfo = (STableMergeScanInfo*)param; - clearupQueryTableDataCond(&pTableScanInfo->cond); + cleanupQueryTableDataCond(&pTableScanInfo->cond); for (int32_t i = 0; i < taosArrayGetSize(pTableScanInfo->dataReaders); ++i) { tsdbReaderT* reader = taosArrayGetP(pTableScanInfo->dataReaders, i); @@ -2283,7 +2270,7 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN int32_t numOfCols = 0; SArray* pColList = - extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, pTaskInfo, COL_MATCH_FROM_COL_ID); + extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID); int32_t code = initQueryTableDataCond(&pInfo->cond, pTableScanNode); if (code != TSDB_CODE_SUCCESS) { diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 81899b68cd..9821e87249 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -22,41 +22,52 @@ static int32_t getExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput); -SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSDataBlock* pResBlock, SArray* pSortInfo, - SExprInfo* pExprInfo, int32_t numOfCols, SArray* pColMatchColInfo, - SExecTaskInfo* pTaskInfo) { +SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortPhyNode, SExecTaskInfo* pTaskInfo) { SSortOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SSortOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); - int32_t rowSize = pResBlock->info.rowSize; - - if (pInfo == NULL || pOperator == NULL || rowSize > 100 * 1024 * 1024) { + if (pInfo == NULL || pOperator == NULL/* || rowSize > 100 * 1024 * 1024*/) { goto _error; } - pOperator->pExpr = pExprInfo; - pOperator->numOfExprs = numOfCols; + SDataBlockDescNode* pDescNode = pSortPhyNode->node.pOutputDataBlockDesc; + + int32_t numOfCols = 0; + SSDataBlock* pResBlock = createResDataBlock(pDescNode); + SExprInfo* pExprInfo = createExprInfo(pSortPhyNode->pExprs, NULL, &numOfCols); + + int32_t numOfOutputCols = 0; + SArray* pColMatchColInfo = + extractColMatchInfo(pSortPhyNode->pTargets, pDescNode, &numOfOutputCols, COL_MATCH_FROM_SLOT_ID); + pInfo->binfo.pCtx = createSqlFunctionCtx(pExprInfo, numOfCols, &pInfo->binfo.rowCellInfoOffset); pInfo->binfo.pRes = pResBlock; initResultSizeInfo(pOperator, 1024); - pInfo->pSortInfo = pSortInfo; + pInfo->pSortInfo = createSortInfo(pSortPhyNode->pSortKeys);; pInfo->pColMatchInfo = pColMatchColInfo; pOperator->name = "SortOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_SORT; pOperator->blocking = true; pOperator->status = OP_NOT_OPENED; pOperator->info = pInfo; + pOperator->pExpr = pExprInfo; + pOperator->numOfExprs = numOfCols; + pOperator->pTaskInfo = pTaskInfo; // lazy evaluation for the following parameter since the input datablock is not known till now. - // pInfo->bufPageSize = rowSize < 1024 ? 1024 * 2 : rowSize * 2; // there are headers, so pageSize = rowSize + - // header pInfo->sortBufSize = pInfo->bufPageSize * 16; // TODO dynamic set the available sort buffer + // pInfo->bufPageSize = rowSize < 1024 ? 1024 * 2 : rowSize * 2; + // there are headers, so pageSize = rowSize + header pInfo->sortBufSize = pInfo->bufPageSize * 16; + // TODO dynamic set the available sort buffer - pOperator->pTaskInfo = pTaskInfo; pOperator->fpSet = createOperatorFpSet(doOpenSortOperator, doSort, NULL, NULL, destroyOrderOperatorInfo, NULL, NULL, getExplainExecInfo); int32_t code = appendDownstream(pOperator, &downstream, 1); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + return pOperator; _error: @@ -154,7 +165,7 @@ int32_t doOpenSortOperator(SOperatorInfo* pOperator) { pInfo->startTs = taosGetTimestampUs(); // pInfo->binfo.pRes is not equalled to the input datablock. - pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, pInfo->pColMatchInfo, SORT_SINGLESOURCE_SORT, -1, -1, + pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_SINGLESOURCE_SORT, -1, -1, NULL, pTaskInfo->id.str); tsortSetFetchRawDataFp(pInfo->pSortHandle, loadNextDataBlock, applyScalarFunction, pOperator); @@ -248,7 +259,7 @@ int32_t doOpenMultiwaySortMergeOperator(SOperatorInfo* pOperator) { int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; - pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, pInfo->pColMatchInfo, SORT_MULTISOURCE_MERGE, + pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage, pInfo->pInputBlock, pTaskInfo->id.str); tsortSetFetchRawDataFp(pInfo->pSortHandle, loadNextDataBlock, NULL, NULL); @@ -392,10 +403,8 @@ SOperatorInfo* createMultiwaySortMergeOperatorInfo(SOperatorInfo** downStreams, pInfo->bufPageSize = getProperSortPageSize(rowSize); - uint32_t numOfSources = taosArrayGetSize(pSortInfo); - numOfSources = TMAX(4, numOfSources); - - pInfo->sortBufSize = numOfSources * pInfo->bufPageSize; + // one additional is reserved for merged result. + pInfo->sortBufSize = pInfo->bufPageSize * (numStreams + 1); pOperator->fpSet = createOperatorFpSet(doOpenMultiwaySortMergeOperator, doMultiwaySortMerge, NULL, NULL, diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index d7ae823522..6e9cd0453e 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1090,7 +1090,7 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) { if (pOperator->status == OP_RES_TO_RETURN) { doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); - if (pBInfo->pRes->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { + if (pBInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) { doSetOperatorCompleted(pOperator); return NULL; } @@ -1122,7 +1122,7 @@ static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) { initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, TSDB_ORDER_ASC); blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity); doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); - if (pBInfo->pRes->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { + if (pBInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) { doSetOperatorCompleted(pOperator); } @@ -1153,7 +1153,7 @@ static SSDataBlock* doBuildIntervalResult(SOperatorInfo* pOperator) { blockDataEnsureCapacity(pBlock, pOperator->resultInfo.capacity); doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); - if (pBlock->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { + if (pBlock->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) { doSetOperatorCompleted(pOperator); } @@ -1176,7 +1176,7 @@ static void finalizeUpdatedResult(int32_t numOfOutput, SDiskbasedBuf* pBuf, SArr SResultRow* pRow = (SResultRow*)((char*)bufPage + pPos->pos.offset); for (int32_t j = 0; j < numOfOutput; ++j) { - SResultRowEntryInfo* pEntry = getResultCell(pRow, j, rowCellInfoOffset); + SResultRowEntryInfo* pEntry = getResultEntryInfo(pRow, j, rowCellInfoOffset); if (pRow->numOfRows < pEntry->numOfRes) { pRow->numOfRows = pEntry->numOfRes; } @@ -1199,7 +1199,7 @@ void doClearWindowImpl(SResultRowPosition* p1, SDiskbasedBuf* pResultBuf, SOptrB SResultRow* pResult = getResultRowByPos(pResultBuf, p1); SqlFunctionCtx* pCtx = pBinfo->pCtx; for (int32_t i = 0; i < numOfOutput; ++i) { - pCtx[i].resultInfo = getResultCell(pResult, i, pBinfo->rowCellInfoOffset); + pCtx[i].resultInfo = getResultEntryInfo(pResult, i, pBinfo->rowCellInfoOffset); struct SResultRowEntryInfo* pResInfo = pCtx[i].resultInfo; if (fmIsWindowPseudoColumnFunc(pCtx[i].functionId)) { continue; @@ -1301,7 +1301,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) { if (pOperator->status == OP_RES_TO_RETURN) { doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); - if (pInfo->binfo.pRes->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { + if (pInfo->binfo.pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) { pOperator->status = OP_EXEC_DONE; } return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes; @@ -1476,7 +1476,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* } } - initResultRowInfo(&pInfo->binfo.resultRowInfo, (int32_t)1); + initResultRowInfo(&pInfo->binfo.resultRowInfo); pOperator->name = "TimeIntervalAggOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL; @@ -1533,7 +1533,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExpr goto _error; } - initResultRowInfo(&pInfo->binfo.resultRowInfo, (int32_t)1); + initResultRowInfo(&pInfo->binfo.resultRowInfo); pOperator->name = "StreamTimeIntervalAggOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL; @@ -1643,7 +1643,7 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) { if (pOperator->status == OP_RES_TO_RETURN) { doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); - if (pBInfo->pRes->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { + if (pBInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) { doSetOperatorCompleted(pOperator); return NULL; } @@ -1678,7 +1678,7 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) { initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, TSDB_ORDER_ASC); blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity); doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); - if (pBInfo->pRes->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { + if (pBInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) { doSetOperatorCompleted(pOperator); } @@ -1714,7 +1714,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { // if (pOperator->status == OP_RES_TO_RETURN) { // // doBuildResultDatablock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pIntervalInfo->pRes); -// if (pResBlock->info.rows == 0 || !hashRemainDataInGroupInfo(&pSliceInfo->groupResInfo)) { +// if (pResBlock->info.rows == 0 || !hasDataInGroupInfo(&pSliceInfo->groupResInfo)) { // doSetOperatorCompleted(pOperator); // } // @@ -1908,7 +1908,7 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SExprInfo* goto _error; } - initResultRowInfo(&pInfo->binfo.resultRowInfo, 8); + initResultRowInfo(&pInfo->binfo.resultRowInfo); pInfo->pFillColInfo = createFillColInfo(pExprInfo, numOfCols, pValNode); pInfo->binfo.pRes = pResultBlock; @@ -1956,7 +1956,7 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf initResultSizeInfo(pOperator, 4096); initAggInfo(&pInfo->binfo, &pInfo->aggSup, pExpr, numOfCols, pResBlock, keyBufSize, pTaskInfo->id.str); - initResultRowInfo(&pInfo->binfo.resultRowInfo, 8); + initResultRowInfo(&pInfo->binfo.resultRowInfo); pInfo->twAggSup = *pTwAggSup; initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); @@ -2006,7 +2006,7 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SExprInfo } pInfo->twAggSup = *pTwAggSupp; - initResultRowInfo(&pInfo->binfo.resultRowInfo, 8); + initResultRowInfo(&pInfo->binfo.resultRowInfo); initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); pInfo->tsSlotId = tsSlotId; @@ -2153,7 +2153,7 @@ static void clearStreamIntervalOperator(SStreamFinalIntervalOperatorInfo* pInfo) taosHashClear(pInfo->aggSup.pResultRowHashTable); clearDiskbasedBuf(pInfo->aggSup.pResultBuf); cleanupResultRowInfo(&pInfo->binfo.resultRowInfo); - initResultRowInfo(&pInfo->binfo.resultRowInfo, 1); + initResultRowInfo(&pInfo->binfo.resultRowInfo); } static void clearUpdateDataBlock(SSDataBlock* pBlock) { @@ -2319,7 +2319,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, if (code != TSDB_CODE_SUCCESS) { goto _error; } - initResultRowInfo(&pInfo->binfo.resultRowInfo, (int32_t)1); + initResultRowInfo(&pInfo->binfo.resultRowInfo); pInfo->pChildren = NULL; if (numOfChild > 0) { pInfo->pChildren = taosArrayInit(numOfChild, sizeof(SOperatorInfo)); @@ -2425,12 +2425,15 @@ int32_t initSessionAggSupporter(SStreamAggSupporter* pSup, const char* pKey, Sql return initStreamAggSupporter(pSup, pKey, pCtx, numOfOutput, sizeof(SResultWindowInfo)); } -SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, - SSDataBlock* pResBlock, int64_t gap, int32_t tsSlotId, - STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) { - int32_t code = TSDB_CODE_OUT_OF_MEMORY; +SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo) { + SSessionWinodwPhysiNode* pSessionNode = (SSessionWinodwPhysiNode*)pPhyNode; + int32_t numOfCols = 0; + SExprInfo* pExprInfo = createExprInfo(pSessionNode->window.pFuncs, NULL, &numOfCols); + SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc); + int32_t tsSlotId = ((SColumnNode*)pSessionNode->window.pTspk)->slotId; + int32_t code = TSDB_CODE_OUT_OF_MEMORY; SStreamSessionAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamSessionAggOperatorInfo)); - SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); + SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { goto _error; } @@ -2453,12 +2456,16 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SEx } initDummyFunction(pInfo->pDummyCtx, pInfo->binfo.pCtx, numOfCols); - pInfo->twAggSup = *pTwAggSupp; - initResultRowInfo(&pInfo->binfo.resultRowInfo, 8); + pInfo->twAggSup = (STimeWindowAggSupp) { + .waterMark = pSessionNode->window.watermark, + .calTrigger = pSessionNode->window.triggerType, + .maxTs = INT64_MIN}; + + initResultRowInfo(&pInfo->binfo.resultRowInfo); initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window); pInfo->primaryTsIndex = tsSlotId; - pInfo->gap = gap; + pInfo->gap = pSessionNode->gap; pInfo->binfo.pRes = pResBlock; pInfo->order = TSDB_ORDER_ASC; _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); @@ -2896,7 +2903,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { return pInfo->pDelRes; } doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf); - if (pBInfo->pRes->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { + if (pBInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) { doSetOperatorCompleted(pOperator); } return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes; @@ -2960,25 +2967,20 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes; } -SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, - int32_t numOfCols, SSDataBlock* pResBlock, int64_t gap, - int32_t tsSlotId, STimeWindowAggSupp* pTwAggSupp, - SExecTaskInfo* pTaskInfo) { - int32_t code = TSDB_CODE_OUT_OF_MEMORY; - SStreamSessionAggOperatorInfo* pInfo = NULL; - SOperatorInfo* pOperator = createStreamSessionAggOperatorInfo(downstream, pExprInfo, numOfCols, pResBlock, gap, - tsSlotId, pTwAggSupp, pTaskInfo); +SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream, + SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild) { + int32_t code = TSDB_CODE_OUT_OF_MEMORY; + SOperatorInfo* pOperator = createStreamSessionAggOperatorInfo(downstream, pPhyNode, pTaskInfo); if (pOperator == NULL) { goto _error; } pOperator->name = "StreamFinalSessionWindowAggOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION; - int32_t numOfChild = 1; // Todo(liuyao) get it from phy plan - pInfo = pOperator->info; + SStreamSessionAggOperatorInfo* pInfo = pOperator->info; pInfo->pChildren = taosArrayInit(8, sizeof(void*)); for (int32_t i = 0; i < numOfChild; i++) { SOperatorInfo* pChild = - createStreamSessionAggOperatorInfo(NULL, pExprInfo, numOfCols, NULL, gap, tsSlotId, pTwAggSupp, pTaskInfo); + createStreamSessionAggOperatorInfo(NULL, pPhyNode, pTaskInfo); if (pChild == NULL) { goto _error; } @@ -2988,7 +2990,7 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream _error: if (pInfo != NULL) { - destroyStreamSessionAggOperatorInfo(pInfo, numOfCols); + destroyStreamSessionAggOperatorInfo(pInfo, pOperator->numOfExprs); } taosMemoryFreeClear(pInfo); @@ -3269,7 +3271,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) { return pInfo->pDelRes; } doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf); - if (pBInfo->pRes->info.rows == 0 || !hashRemainDataInGroupInfo(&pInfo->groupResInfo)) { + if (pBInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) { doSetOperatorCompleted(pOperator); } return pBInfo->pRes->info.rows == 0 ? NULL : pBInfo->pRes; @@ -3342,7 +3344,7 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys pInfo->stateCol = extractColumnFromColumnNode(pColNode); initResultSizeInfo(pOperator, 4096); - initResultRowInfo(&pInfo->binfo.resultRowInfo, 8); + initResultRowInfo(&pInfo->binfo.resultRowInfo); pInfo->twAggSup = (STimeWindowAggSupp){ .waterMark = pStateNode->window.watermark, .calTrigger = pStateNode->window.triggerType, @@ -3590,7 +3592,7 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprI goto _error; } - initResultRowInfo(&iaInfo->binfo.resultRowInfo, (int32_t)1); + initResultRowInfo(&iaInfo->binfo.resultRowInfo); pOperator->name = "TimeMergeIntervalAggOperator"; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL; diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index 1502387360..f21cad2dd6 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -71,7 +71,7 @@ SSDataBlock* tsortGetSortedDataBlock(const SSortHandle* pSortHandle) { * @param type * @return */ -SSortHandle* tsortCreateSortHandle(SArray* pSortInfo, SArray* pIndexMap, int32_t type, int32_t pageSize, int32_t numOfPages, SSDataBlock* pBlock, const char* idstr) { +SSortHandle* tsortCreateSortHandle(SArray* pSortInfo, int32_t type, int32_t pageSize, int32_t numOfPages, SSDataBlock* pBlock, const char* idstr) { SSortHandle* pSortHandle = taosMemoryCalloc(1, sizeof(SSortHandle)); pSortHandle->type = type; @@ -195,6 +195,11 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) { return doAddNewExternalMemSource(pHandle->pBuf, pHandle->pOrderedSource, pBlock, &pHandle->sourceId); } +static void setCurrentSourceIsDone(SSortSource* pSource, SSortHandle* pHandle) { + pSource->src.rowIndex = -1; + ++pHandle->numOfCompletedSources; +} + static int32_t sortComparInit(SMsortComparParam* cmpParam, SArray* pSources, int32_t startIndex, int32_t endIndex, SSortHandle* pHandle) { cmpParam->pSources = taosArrayGet(pSources, startIndex); cmpParam->numOfSources = (endIndex - startIndex + 1); @@ -205,8 +210,10 @@ static int32_t sortComparInit(SMsortComparParam* cmpParam, SArray* pSources, int for (int32_t i = 0; i < cmpParam->numOfSources; ++i) { SSortSource* pSource = cmpParam->pSources[i]; + // set current source is done if (taosArrayGetSize(pSource->pageIdList) == 0) { - return TSDB_CODE_SUCCESS; + setCurrentSourceIsDone(pSource, pHandle); + continue; } SPageInfo* pPgInfo = *(SPageInfo**)taosArrayGet(pSource->pageIdList, pSource->pageIndex); @@ -233,10 +240,9 @@ static int32_t sortComparInit(SMsortComparParam* cmpParam, SArray* pSources, int SSortSource* pSource = cmpParam->pSources[i]; pSource->src.pBlock = pHandle->fetchfp(pSource->param); - // set current source id done + // set current source is done if (pSource->src.pBlock == NULL) { - pSource->src.rowIndex = -1; - ++pHandle->numOfCompletedSources; + setCurrentSourceIsDone(pSource, pHandle); } } } diff --git a/source/libs/executor/test/sortTests.cpp b/source/libs/executor/test/sortTests.cpp index c037fae75f..66ed078bbe 100644 --- a/source/libs/executor/test/sortTests.cpp +++ b/source/libs/executor/test/sortTests.cpp @@ -209,7 +209,7 @@ TEST(testCase, inMem_sort_Test) { SArray* orderInfo = taosArrayInit(1, sizeof(SBlockOrderInfo)); taosArrayPush(orderInfo, &oi); - SSortHandle* phandle = tsortCreateSortHandle(orderInfo, NULL, SORT_SINGLESOURCE_SORT, 1024, 5, NULL, "test_abc"); + SSortHandle* phandle = tsortCreateSortHandle(orderInfo, SORT_SINGLESOURCE_SORT, 1024, 5, NULL, "test_abc"); tsortSetFetchRawDataFp(phandle, getSingleColDummyBlock, NULL, NULL); _info* pInfo = (_info*) taosMemoryCalloc(1, sizeof(_info)); @@ -298,7 +298,7 @@ TEST(testCase, external_mem_sort_Test) { SArray* orderInfo = taosArrayInit(1, sizeof(SBlockOrderInfo)); taosArrayPush(orderInfo, &oi); - SSortHandle* phandle = tsortCreateSortHandle(orderInfo, NULL, SORT_SINGLESOURCE_SORT, 128, 3, NULL, "test_abc"); + SSortHandle* phandle = tsortCreateSortHandle(orderInfo, SORT_SINGLESOURCE_SORT, 128, 3, NULL, "test_abc"); tsortSetFetchRawDataFp(phandle, getSingleColDummyBlock, NULL, NULL); SSortSource* ps = static_cast(taosMemoryCalloc(1, sizeof(SSortSource))); @@ -365,7 +365,7 @@ TEST(testCase, ordered_merge_sort_Test) { taosArrayPush(pBlock->pDataBlock, &colInfo); } - SSortHandle* phandle = tsortCreateSortHandle(orderInfo, NULL, SORT_MULTISOURCE_MERGE, 1024, 5, pBlock,"test_abc"); + SSortHandle* phandle = tsortCreateSortHandle(orderInfo, SORT_MULTISOURCE_MERGE, 1024, 5, pBlock,"test_abc"); tsortSetFetchRawDataFp(phandle, getSingleColDummyBlock, NULL, NULL); tsortSetComparFp(phandle, docomp); diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 2e30b01357..d0eb1548c1 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -427,7 +427,7 @@ static SNode* logicWindowCopy(const SWindowLogicNode* pSrc, SWindowLogicNode* pD COPY_SCALAR_FIELD(triggerType); COPY_SCALAR_FIELD(watermark); COPY_SCALAR_FIELD(filesFactor); - COPY_SCALAR_FIELD(intervalAlgo); + COPY_SCALAR_FIELD(windowAlgo); return (SNode*)pDst; } @@ -538,6 +538,12 @@ static SNode* physiIntervalCopy(const SIntervalPhysiNode* pSrc, SIntervalPhysiNo return (SNode*)pDst; } +static SNode* physiSessionCopy(const SSessionWinodwPhysiNode* pSrc, SSessionWinodwPhysiNode* pDst) { + COPY_BASE_OBJECT_FIELD(window, physiWindowCopy); + COPY_SCALAR_FIELD(gap); + return (SNode*)pDst; +} + static SNode* dataBlockDescCopy(const SDataBlockDescNode* pSrc, SDataBlockDescNode* pDst) { COPY_SCALAR_FIELD(dataBlockId); CLONE_NODE_LIST_FIELD(pSlots); @@ -678,6 +684,9 @@ SNode* nodesCloneNode(const SNode* pNode) { case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL: case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL: return physiIntervalCopy((const SIntervalPhysiNode*)pNode, (SIntervalPhysiNode*)pDst); + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION: + return physiSessionCopy((const SSessionWinodwPhysiNode*)pNode, (SSessionWinodwPhysiNode*)pDst); default: break; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index fb6a428d3c..1b2eadd8e8 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -172,8 +172,6 @@ const char* nodesNodeName(ENodeType type) { return "ShowSubscribesStmt"; case QUERY_NODE_SHOW_SMAS_STMT: return "ShowSmasStmt"; - case QUERY_NODE_SHOW_CONFIGS_STMT: - return "ShowConfigsStmt"; case QUERY_NODE_SHOW_QUERIES_STMT: return "ShowQueriesStmt"; case QUERY_NODE_SHOW_VNODES_STMT: @@ -246,6 +244,10 @@ const char* nodesNodeName(ENodeType type) { return "PhysiSessionWindow"; case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION: return "PhysiStreamSessionWindow"; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION: + return "PhysiStreamSemiSessionWindow"; + case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION: + return "PhysiStreamFinalSessionWindow"; case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE: return "PhysiStateWindow"; case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE: @@ -3998,6 +4000,8 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { return physiFillNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION: case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION: return physiSessionWindowNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE: case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE: @@ -4131,6 +4135,8 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) { return jsonToPhysiFillNode(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION: case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION: return jsonToPhysiSessionWindowNode(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE: case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE: diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index a8de997643..d8b386df45 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -200,7 +200,6 @@ SNode* nodesMakeNode(ENodeType type) { case QUERY_NODE_SHOW_CONSUMERS_STMT: case QUERY_NODE_SHOW_SUBSCRIBES_STMT: case QUERY_NODE_SHOW_SMAS_STMT: - case QUERY_NODE_SHOW_CONFIGS_STMT: case QUERY_NODE_SHOW_CONNECTIONS_STMT: case QUERY_NODE_SHOW_QUERIES_STMT: case QUERY_NODE_SHOW_VNODES_STMT: @@ -289,6 +288,10 @@ SNode* nodesMakeNode(ENodeType type) { return makeNode(type, sizeof(SSessionWinodwPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION: return makeNode(type, sizeof(SStreamSessionWinodwPhysiNode)); + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION: + return makeNode(type, sizeof(SStreamSemiSessionWinodwPhysiNode)); + case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION: + return makeNode(type, sizeof(SStreamFinalSessionWinodwPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE: return makeNode(type, sizeof(SStateWinodwPhysiNode)); case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE: @@ -616,7 +619,6 @@ void nodesDestroyNode(SNode* pNode) { case QUERY_NODE_SHOW_CONSUMERS_STMT: case QUERY_NODE_SHOW_SUBSCRIBES_STMT: case QUERY_NODE_SHOW_SMAS_STMT: - case QUERY_NODE_SHOW_CONFIGS_STMT: case QUERY_NODE_SHOW_CONNECTIONS_STMT: case QUERY_NODE_SHOW_QUERIES_STMT: case QUERY_NODE_SHOW_VNODES_STMT: @@ -807,6 +809,7 @@ void nodesDestroyNode(SNode* pNode) { } case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION: case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION: + case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION: case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION: destroyWinodwPhysiNode((SWinodwPhysiNode*)pNode); break; diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index 7f301aa81f..d1e57a39b0 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -1460,7 +1460,7 @@ SNode* createKillQueryStmt(SAstCreateContext* pCxt, const SToken* pQueryId) { CHECK_PARSER_STATUS(pCxt); SKillQueryStmt* pStmt = (SKillQueryStmt*)nodesMakeNode(QUERY_NODE_KILL_QUERY_STMT); CHECK_OUT_OF_MEM(pStmt); - strncpy(pStmt->queryId, pQueryId->z, TMIN(pQueryId->n, sizeof(pStmt->queryId) - 1)); + trimString(pQueryId->z, pQueryId->n, pStmt->queryId, sizeof(pStmt->queryId) - 1); return (SNode*)pStmt; } diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index 1aa1320a3c..1e968fe1fa 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -387,6 +387,21 @@ static int32_t collectMetaKeyFromShowQueries(SCollectMetaKeyCxt* pCxt, SShowStmt pCxt->pMetaCache); } +static int32_t collectMetaKeyFromShowConfigs(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CONFIGS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowVariables(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CONFIGS, + pCxt->pMetaCache); +} + +static int32_t collectMetaKeyFromShowApps(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_APPS, + pCxt->pMetaCache); +} + static int32_t collectMetaKeyFromShowTransactions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TRANS, pCxt->pMetaCache); @@ -461,6 +476,10 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { return collectMetaKeyFromShowConnections(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_QUERIES_STMT: return collectMetaKeyFromShowQueries(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_VARIABLE_STMT: + return collectMetaKeyFromShowVariables(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_APPS_STMT: + return collectMetaKeyFromShowApps(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_TRANSACTIONS_STMT: return collectMetaKeyFromShowTransactions(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_DELETE_STMT: diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index d840d10db9..235b24d993 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -3892,7 +3892,7 @@ static int32_t translateDescribe(STranslateContext* pCxt, SDescribeStmt* pStmt) static int32_t translateKillConnection(STranslateContext* pCxt, SKillStmt* pStmt) { SKillConnReq killReq = {0}; killReq.connId = pStmt->targetId; - return buildCmdMsg(pCxt, TDMT_MND_KILL_CONN, (FSerializeFunc)tSerializeSKillQueryReq, &killReq); + return buildCmdMsg(pCxt, TDMT_MND_KILL_CONN, (FSerializeFunc)tSerializeSKillConnReq, &killReq); } static int32_t translateKillQuery(STranslateContext* pCxt, SKillQueryStmt* pStmt) { diff --git a/source/libs/parser/test/mockCatalogService.cpp b/source/libs/parser/test/mockCatalogService.cpp index 68f9e9d36d..9758da7899 100644 --- a/source/libs/parser/test/mockCatalogService.cpp +++ b/source/libs/parser/test/mockCatalogService.cpp @@ -285,7 +285,7 @@ class MockCatalogServiceImpl { } void createSmaIndex(const SMCreateSmaReq* pReq) { - STableIndexInfo info; + STableIndexInfo info = {0}; info.intervalUnit = pReq->intervalUnit; info.slidingUnit = pReq->slidingUnit; info.interval = pReq->interval; diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index cbc74a2711..3cd5eeb655 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -548,6 +548,7 @@ static int32_t createWindowLogicNodeBySession(SLogicPlanContext* pCxt, SSessionW pWindow->winType = WINDOW_TYPE_SESSION; pWindow->sessionGap = ((SValueNode*)pSession->pGap)->datum.i; + pWindow->windowAlgo = pCxt->pPlanCxt->streamQuery ? SESSION_ALGO_STREAM_SINGLE : SESSION_ALGO_MERGE; pWindow->pTspk = nodesCloneNode((SNode*)pSession->pCol); if (NULL == pWindow->pTspk) { @@ -572,7 +573,7 @@ static int32_t createWindowLogicNodeByInterval(SLogicPlanContext* pCxt, SInterva pWindow->sliding = (NULL != pInterval->pSliding ? ((SValueNode*)pInterval->pSliding)->datum.i : pWindow->interval); pWindow->slidingUnit = (NULL != pInterval->pSliding ? ((SValueNode*)pInterval->pSliding)->unit : pWindow->intervalUnit); - pWindow->intervalAlgo = pCxt->pPlanCxt->streamQuery ? INTERVAL_ALGO_STREAM_SINGLE : INTERVAL_ALGO_HASH; + pWindow->windowAlgo = pCxt->pPlanCxt->streamQuery ? INTERVAL_ALGO_STREAM_SINGLE : INTERVAL_ALGO_HASH; pWindow->pTspk = nodesCloneNode(pInterval->pCol); if (NULL == pWindow->pTspk) { diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 9d7cd0cf27..a7c25162b7 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -731,6 +731,7 @@ static int32_t opkDoOptimized(SOptimizeContext* pCxt, SSortLogicNode* pSort, SNo FOREACH(pNode, pSort->node.pParent->pChildren) { if (nodesEqualNode(pNode, (SNode*)pSort)) { REPLACE_NODE(pDownNode); + ((SLogicNode*)pDownNode)->pParent = pSort->node.pParent; break; } } diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 2974b3ef8c..99476c4cbd 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -977,8 +977,8 @@ static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList* return code; } -static ENodeType getIntervalOperatorType(EIntervalAlgorithm intervalAlgo) { - switch (intervalAlgo) { +static ENodeType getIntervalOperatorType(EWindowAlgorithm windowAlgo) { + switch (windowAlgo) { case INTERVAL_ALGO_HASH: return QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL; case INTERVAL_ALGO_MERGE: @@ -989,6 +989,14 @@ static ENodeType getIntervalOperatorType(EIntervalAlgorithm intervalAlgo) { return QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL; case INTERVAL_ALGO_STREAM_SINGLE: return QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL; + case SESSION_ALGO_STREAM_FINAL: + return QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION; + case SESSION_ALGO_STREAM_SEMI: + return QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION; + case SESSION_ALGO_STREAM_SINGLE: + return QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION; + case SESSION_ALGO_MERGE: + return QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION; default: break; } @@ -998,7 +1006,7 @@ static ENodeType getIntervalOperatorType(EIntervalAlgorithm intervalAlgo) { static int32_t createIntervalPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SWindowLogicNode* pWindowLogicNode, SPhysiNode** pPhyNode) { SIntervalPhysiNode* pInterval = (SIntervalPhysiNode*)makePhysiNode( - pCxt, (SLogicNode*)pWindowLogicNode, getIntervalOperatorType(pWindowLogicNode->intervalAlgo)); + pCxt, (SLogicNode*)pWindowLogicNode, getIntervalOperatorType(pWindowLogicNode->windowAlgo)); if (NULL == pInterval) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -1015,8 +1023,7 @@ static int32_t createIntervalPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChil static int32_t createSessionWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SWindowLogicNode* pWindowLogicNode, SPhysiNode** pPhyNode) { SSessionWinodwPhysiNode* pSession = (SSessionWinodwPhysiNode*)makePhysiNode( - pCxt, (SLogicNode*)pWindowLogicNode, - (pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION : QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION)); + pCxt, (SLogicNode*)pWindowLogicNode, getIntervalOperatorType(pWindowLogicNode->windowAlgo)); if (NULL == pSession) { return TSDB_CODE_OUT_OF_MEMORY; } diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index 3a9e1d7405..fe02dd6425 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -376,8 +376,8 @@ static int32_t stbSplSplitIntervalForBatch(SSplitContext* pCxt, SStableSplitInfo SLogicNode* pPartWindow = NULL; int32_t code = stbSplCreatePartWindowNode((SWindowLogicNode*)pInfo->pSplitNode, &pPartWindow); if (TSDB_CODE_SUCCESS == code) { - ((SWindowLogicNode*)pPartWindow)->intervalAlgo = INTERVAL_ALGO_HASH; - ((SWindowLogicNode*)pInfo->pSplitNode)->intervalAlgo = INTERVAL_ALGO_MERGE; + ((SWindowLogicNode*)pPartWindow)->windowAlgo = INTERVAL_ALGO_HASH; + ((SWindowLogicNode*)pInfo->pSplitNode)->windowAlgo = INTERVAL_ALGO_MERGE; SNodeList* pMergeKeys = NULL; code = stbSplCreateMergeKeysByPrimaryKey(((SWindowLogicNode*)pInfo->pSplitNode)->pTspk, &pMergeKeys); if (TSDB_CODE_SUCCESS == code) { @@ -400,8 +400,8 @@ static int32_t stbSplSplitIntervalForStream(SSplitContext* pCxt, SStableSplitInf SLogicNode* pPartWindow = NULL; int32_t code = stbSplCreatePartWindowNode((SWindowLogicNode*)pInfo->pSplitNode, &pPartWindow); if (TSDB_CODE_SUCCESS == code) { - ((SWindowLogicNode*)pPartWindow)->intervalAlgo = INTERVAL_ALGO_STREAM_SEMI; - ((SWindowLogicNode*)pInfo->pSplitNode)->intervalAlgo = INTERVAL_ALGO_STREAM_FINAL; + ((SWindowLogicNode*)pPartWindow)->windowAlgo = INTERVAL_ALGO_STREAM_SEMI; + ((SWindowLogicNode*)pInfo->pSplitNode)->windowAlgo = INTERVAL_ALGO_STREAM_FINAL; code = stbSplCreateExchangeNode(pCxt, pInfo->pSplitNode, pPartWindow); } if (TSDB_CODE_SUCCESS == code) { @@ -421,8 +421,29 @@ static int32_t stbSplSplitInterval(SSplitContext* pCxt, SStableSplitInfo* pInfo) } } +static int32_t stbSplSplitSessionForStream(SSplitContext* pCxt, SStableSplitInfo* pInfo) { + SLogicNode* pPartWindow = NULL; + int32_t code = stbSplCreatePartWindowNode((SWindowLogicNode*)pInfo->pSplitNode, &pPartWindow); + if (TSDB_CODE_SUCCESS == code) { + ((SWindowLogicNode*)pPartWindow)->windowAlgo = SESSION_ALGO_STREAM_SEMI; + ((SWindowLogicNode*)pInfo->pSplitNode)->windowAlgo = SESSION_ALGO_STREAM_FINAL; + code = stbSplCreateExchangeNode(pCxt, pInfo->pSplitNode, pPartWindow); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren, + (SNode*)splCreateScanSubplan(pCxt, pPartWindow, SPLIT_FLAG_STABLE_SPLIT)); + } + pInfo->pSubplan->subplanType = SUBPLAN_TYPE_MERGE; + ++(pCxt->groupId); + return code; +} + static int32_t stbSplSplitSession(SSplitContext* pCxt, SStableSplitInfo* pInfo) { - return TSDB_CODE_PLAN_INTERNAL_ERROR; + if (pCxt->pPlanCxt->streamQuery) { + return stbSplSplitSessionForStream(pCxt, pInfo); + } else { + return TSDB_CODE_PLAN_INTERNAL_ERROR; + } } static int32_t stbSplSplitWindowNode(SSplitContext* pCxt, SStableSplitInfo* pInfo) { @@ -537,10 +558,12 @@ static int32_t stbSplCreateMergeKeys(SNodeList* pSortKeys, SNodeList* pTargets, SNode* pNode = NULL; FOREACH(pNode, pSortKeys) { SOrderByExprNode* pSortKey = (SOrderByExprNode*)pNode; + SExprNode* pSortExpr = (SExprNode*)pSortKey->pExpr; SNode* pTarget = NULL; bool found = false; FOREACH(pTarget, pTargets) { - if (0 == strcmp(((SExprNode*)pSortKey->pExpr)->aliasName, ((SColumnNode*)pTarget)->colName)) { + if ((QUERY_NODE_COLUMN == nodeType(pSortExpr) && nodesEqualNode((SNode*)pSortExpr, pTarget)) || + (0 == strcmp(pSortExpr->aliasName, ((SColumnNode*)pTarget)->colName))) { code = nodesListMakeStrictAppend(&pMergeKeys, stbSplCreateOrderByExpr(pSortKey, pTarget)); if (TSDB_CODE_SUCCESS != code) { break; @@ -549,7 +572,7 @@ static int32_t stbSplCreateMergeKeys(SNodeList* pSortKeys, SNodeList* pTargets, } } if (TSDB_CODE_SUCCESS == code && !found) { - SNode* pCol = stbSplCreateColumnNode((SExprNode*)pSortKey->pExpr); + SNode* pCol = stbSplCreateColumnNode(pSortExpr); code = nodesListMakeStrictAppend(&pMergeKeys, stbSplCreateOrderByExpr(pSortKey, pCol)); if (TSDB_CODE_SUCCESS == code) { code = nodesListStrictAppend(pTargets, pCol); diff --git a/source/libs/planner/test/planOrderByTest.cpp b/source/libs/planner/test/planOrderByTest.cpp index b065ad0a59..2ca662cf86 100644 --- a/source/libs/planner/test/planOrderByTest.cpp +++ b/source/libs/planner/test/planOrderByTest.cpp @@ -28,6 +28,8 @@ TEST_F(PlanOrderByTest, basic) { // ORDER BY key is not in the projection list run("SELECT c1 FROM t1 ORDER BY c2"); + run("SELECT c1 AS a FROM t1 ORDER BY a"); + run("SELECT c1 + 10 AS a FROM t1 ORDER BY a"); } @@ -59,4 +61,6 @@ TEST_F(PlanOrderByTest, stable) { run("SELECT c2 FROM st1 ORDER BY c1"); run("SELECT c2 FROM st1 PARTITION BY c2 ORDER BY c1"); + + run("SELECT c1 AS a FROM st1 ORDER BY a"); } diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c index a5a499aaf5..9322d7e594 100644 --- a/source/libs/qcom/src/queryUtil.c +++ b/source/libs/qcom/src/queryUtil.c @@ -177,10 +177,6 @@ char* jobTaskStatusStr(int32_t status) { return "SUCCEED"; case JOB_TASK_STATUS_FAILED: return "FAILED"; - case JOB_TASK_STATUS_CANCELLING: - return "CANCELLING"; - case JOB_TASK_STATUS_CANCELLED: - return "CANCELLED"; case JOB_TASK_STATUS_DROPPING: return "DROPPING"; default: diff --git a/source/libs/qworker/src/qwDbg.c b/source/libs/qworker/src/qwDbg.c index 368c3bb517..add9700a3a 100644 --- a/source/libs/qworker/src/qwDbg.c +++ b/source/libs/qworker/src/qwDbg.c @@ -44,40 +44,30 @@ int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus, break; case JOB_TASK_STATUS_EXECUTING: if (newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED && newStatus != JOB_TASK_STATUS_SUCCEED && - newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_CANCELLING && - newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) { + newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_DROPPING) { QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); } break; case JOB_TASK_STATUS_PARTIAL_SUCCEED: if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_SUCCEED && - newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_FAILED && - newStatus != JOB_TASK_STATUS_DROPPING) { + newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_DROPPING) { QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); } break; case JOB_TASK_STATUS_SUCCEED: - if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING && - newStatus != JOB_TASK_STATUS_FAILED) { + if (newStatus != JOB_TASK_STATUS_DROPPING && newStatus != JOB_TASK_STATUS_FAILED) { QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); } break; case JOB_TASK_STATUS_FAILED: - if (newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) { + if (newStatus != JOB_TASK_STATUS_DROPPING) { QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); } break; - case JOB_TASK_STATUS_CANCELLING: - if (newStatus != JOB_TASK_STATUS_CANCELLED) { - QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); - } - - break; - case JOB_TASK_STATUS_CANCELLED: case JOB_TASK_STATUS_DROPPING: if (newStatus != JOB_TASK_STATUS_FAILED && newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) { QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index 57ebb89ed2..800cc4c6e5 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -614,6 +614,8 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) { QW_SET_EVENT_PROCESSED(ctx, QW_EVENT_FETCH); qwBuildAndSendFetchRsp(&qwMsg->connInfo, rsp, dataLen, code); + rsp = NULL; + QW_TASK_DLOG("fetch rsp send, handle:%p, code:%x - %s, dataLen:%d", qwMsg->connInfo.handle, code, tstrerror(code), dataLen); } else { @@ -633,7 +635,7 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) { rsp = NULL; qwMsg->connInfo = ctx->dataConnInfo; - qwBuildAndSendFetchRsp(&qwMsg->connInfo, rsp, 0, code); + qwBuildAndSendFetchRsp(&qwMsg->connInfo, NULL, 0, code); QW_TASK_DLOG("fetch rsp send, handle:%p, code:%x - %s, dataLen:%d", qwMsg->connInfo.handle, code, tstrerror(code), 0); } diff --git a/source/libs/scheduler/inc/schedulerInt.h b/source/libs/scheduler/inc/schedulerInt.h index 8875ebdf73..545bb6c45a 100644 --- a/source/libs/scheduler/inc/schedulerInt.h +++ b/source/libs/scheduler/inc/schedulerInt.h @@ -48,6 +48,12 @@ enum { SCH_FETCH_CB, }; +typedef enum { + SCH_OP_NULL = 0, + SCH_OP_EXEC, + SCH_OP_FETCH, +} SCH_OP_TYPE; + typedef struct SSchTrans { void *pTrans; void *pHandle; @@ -188,11 +194,15 @@ typedef struct SSchTask { typedef struct SSchJobAttr { EExplainMode explainMode; - bool syncSchedule; bool queryJob; bool needFlowCtrl; } SSchJobAttr; +typedef struct { + int32_t op; + bool sync; +} SSchOpStatus; + typedef struct SSchJob { int64_t refId; uint64_t queryId; @@ -217,8 +227,8 @@ typedef struct SSchJob { int8_t status; SQueryNodeAddr resNode; tsem_t rspSem; - int8_t userFetch; - int32_t remoteFetch; + SSchOpStatus opStatus; + bool *reqKilled; SSchTask *fetchTask; int32_t errCode; SRWLatch resLock; @@ -227,7 +237,6 @@ typedef struct SSchJob { int32_t resNumOfRows; SSchResInfo userRes; const char *sql; - int32_t userCb; SQueryProfileSummary summary; } SSchJob; @@ -285,6 +294,10 @@ extern SSchedulerMgmt schMgmt; #define SCH_GET_JOB_STATUS(job) atomic_load_8(&(job)->status) #define SCH_GET_JOB_STATUS_STR(job) jobTaskStatusStr(SCH_GET_JOB_STATUS(job)) +#define SCH_JOB_IN_SYNC_OP(job) ((job)->opStatus.op && (job)->opStatus.sync) +#define SCH_JOB_IN_ASYNC_EXEC_OP(job) (((job)->opStatus.op == SCH_OP_EXEC) && (!(job)->opStatus.sync)) +#define SCH_JOB_IN_ASYNC_FETCH_OP(job) (((job)->opStatus.op == SCH_OP_FETCH) && (!(job)->opStatus.sync)) + #define SCH_SET_JOB_NEED_FLOW_CTRL(_job) (_job)->attr.needFlowCtrl = true #define SCH_JOB_NEED_FLOW_CTRL(_job) ((_job)->attr.needFlowCtrl) #define SCH_TASK_NEED_FLOW_CTRL(_job, _task) (SCH_IS_DATA_SRC_QRY_TASK(_task) && SCH_JOB_NEED_FLOW_CTRL(_job) && SCH_IS_LEVEL_UNFINISHED((_task)->level)) @@ -356,7 +369,7 @@ int32_t schMakeBrokenLinkVal(SSchJob *pJob, SSchTask *pTask, SRpcBrokenlinkVal * int32_t schAppendTaskExecNode(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr, int32_t execIdx); int32_t schExecStaticExplainJob(SSchedulerReq *pReq, int64_t *job, bool sync); int32_t schExecJobImpl(SSchedulerReq *pReq, int64_t *job, SQueryResult* pRes, bool sync); -int32_t schChkUpdateJobStatus(SSchJob *pJob, int8_t newStatus); +int32_t schUpdateJobStatus(SSchJob *pJob, int8_t newStatus); int32_t schCancelJob(SSchJob *pJob); int32_t schProcessOnJobDropped(SSchJob *pJob, int32_t errCode); uint64_t schGenTaskId(void); @@ -368,6 +381,8 @@ int32_t schAsyncFetchRows(SSchJob *pJob); int32_t schUpdateTaskHandle(SSchJob *pJob, SSchTask *pTask, bool dropExecNode, void *handle, int32_t execIdx); int32_t schProcessOnTaskStatusRsp(SQueryNodeEpId* pEpId, SArray* pStatusList); void schFreeSMsgSendInfo(SMsgSendInfo *msgSendInfo); +char* schGetOpStr(SCH_OP_TYPE type); +int32_t schBeginOperation(SSchJob *pJob, SCH_OP_TYPE type, bool sync); #ifdef __cplusplus diff --git a/source/libs/scheduler/src/schJob.c b/source/libs/scheduler/src/schJob.c index 31536f413d..733f8694cc 100644 --- a/source/libs/scheduler/src/schJob.c +++ b/source/libs/scheduler/src/schJob.c @@ -21,9 +21,9 @@ #include "tref.h" #include "trpc.h" -FORCE_INLINE SSchJob *schAcquireJob(int64_t refId) { qDebug("acquire jobId:0x%"PRIx64, refId); return (SSchJob *)taosAcquireRef(schMgmt.jobRef, refId); } +FORCE_INLINE SSchJob *schAcquireJob(int64_t refId) { qDebug("sch acquire jobId:0x%"PRIx64, refId); return (SSchJob *)taosAcquireRef(schMgmt.jobRef, refId); } -FORCE_INLINE int32_t schReleaseJob(int64_t refId) { qDebug("release jobId:0x%"PRIx64, refId); return taosReleaseRef(schMgmt.jobRef, refId); } +FORCE_INLINE int32_t schReleaseJob(int64_t refId) { qDebug("sch release jobId:0x%"PRIx64, refId); return taosReleaseRef(schMgmt.jobRef, refId); } int32_t schInitTask(SSchJob *pJob, SSchTask *pTask, SSubplan *pPlan, SSchLevel *pLevel) { pTask->plan = pPlan; @@ -47,14 +47,14 @@ int32_t schInitJob(SSchedulerReq *pReq, SSchJob **pSchJob, SQueryResult* pRes, b int64_t refId = -1; SSchJob *pJob = taosMemoryCalloc(1, sizeof(SSchJob)); if (NULL == pJob) { - qError("QID:%" PRIx64 " calloc %d failed", pReq->pDag->queryId, (int32_t)sizeof(SSchJob)); + qError("QID:0x%" PRIx64 " calloc %d failed", pReq->pDag->queryId, (int32_t)sizeof(SSchJob)); SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); } pJob->attr.explainMode = pReq->pDag->explainInfo.mode; - pJob->attr.syncSchedule = syncSchedule; pJob->conn = *pReq->pConn; pJob->sql = pReq->sql; + pJob->reqKilled = pReq->reqKilled; pJob->userRes.queryRes = pRes; pJob->userRes.execFp = pReq->fp; pJob->userRes.userParam = pReq->cbParam; @@ -108,7 +108,7 @@ int32_t schInitJob(SSchedulerReq *pReq, SSchJob **pSchJob, SQueryResult* pRes, b atomic_add_fetch_32(&schMgmt.jobNum, 1); if (NULL == schAcquireJob(refId)) { - SCH_JOB_ELOG("schAcquireJob job failed, refId:%" PRIx64, refId); + SCH_JOB_ELOG("schAcquireJob job failed, refId:0x%" PRIx64, refId); SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR); } @@ -116,7 +116,7 @@ int32_t schInitJob(SSchedulerReq *pReq, SSchJob **pSchJob, SQueryResult* pRes, b SCH_JOB_DLOG("job refId:0x%" PRIx64" created", pJob->refId); - pJob->status = JOB_TASK_STATUS_NOT_START; + schUpdateJobStatus(pJob, JOB_TASK_STATUS_NOT_START); *pSchJob = pJob; @@ -155,18 +155,57 @@ void schFreeTask(SSchJob *pJob, SSchTask *pTask) { } } + +void schUpdateJobErrCode(SSchJob *pJob, int32_t errCode) { + if (TSDB_CODE_SUCCESS == errCode) { + return; + } + + int32_t origCode = atomic_load_32(&pJob->errCode); + if (TSDB_CODE_SUCCESS == origCode) { + if (origCode == atomic_val_compare_exchange_32(&pJob->errCode, origCode, errCode)) { + goto _return; + } + + origCode = atomic_load_32(&pJob->errCode); + } + + if (NEED_CLIENT_HANDLE_ERROR(origCode)) { + return; + } + + if (NEED_CLIENT_HANDLE_ERROR(errCode)) { + atomic_store_32(&pJob->errCode, errCode); + goto _return; + } + + return; + +_return: + + SCH_JOB_DLOG("job errCode updated to %x - %s", errCode, tstrerror(errCode)); +} + + + FORCE_INLINE bool schJobNeedToStop(SSchJob *pJob, int8_t *pStatus) { int8_t status = SCH_GET_JOB_STATUS(pJob); if (pStatus) { *pStatus = status; } - return (status == JOB_TASK_STATUS_FAILED || status == JOB_TASK_STATUS_CANCELLED || - status == JOB_TASK_STATUS_CANCELLING || status == JOB_TASK_STATUS_DROPPING || + if (*pJob->reqKilled) { + schUpdateJobStatus(pJob, JOB_TASK_STATUS_DROPPING); + schUpdateJobErrCode(pJob, TSDB_CODE_TSC_QUERY_KILLED); + + return true; + } + + return (status == JOB_TASK_STATUS_FAILED || status == JOB_TASK_STATUS_DROPPING || status == JOB_TASK_STATUS_SUCCEED); } -int32_t schChkUpdateJobStatus(SSchJob *pJob, int8_t newStatus) { +int32_t schUpdateJobStatus(SSchJob *pJob, int8_t newStatus) { int32_t code = 0; int8_t oriStatus = 0; @@ -175,7 +214,11 @@ int32_t schChkUpdateJobStatus(SSchJob *pJob, int8_t newStatus) { oriStatus = SCH_GET_JOB_STATUS(pJob); if (oriStatus == newStatus) { - SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + if (newStatus == JOB_TASK_STATUS_DROPPING) { + SCH_ERR_JRET(TSDB_CODE_SCH_JOB_IS_DROPPING); + } + + SCH_ERR_JRET(TSDB_CODE_SCH_IGNORE_ERROR); } switch (oriStatus) { @@ -186,14 +229,13 @@ int32_t schChkUpdateJobStatus(SSchJob *pJob, int8_t newStatus) { break; case JOB_TASK_STATUS_NOT_START: - if (newStatus != JOB_TASK_STATUS_EXECUTING) { + if (newStatus != JOB_TASK_STATUS_EXECUTING && newStatus != JOB_TASK_STATUS_DROPPING) { SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); } break; case JOB_TASK_STATUS_EXECUTING: if (newStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED && newStatus != JOB_TASK_STATUS_FAILED && - newStatus != JOB_TASK_STATUS_CANCELLING && newStatus != JOB_TASK_STATUS_CANCELLED && newStatus != JOB_TASK_STATUS_DROPPING) { SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); } @@ -208,13 +250,11 @@ int32_t schChkUpdateJobStatus(SSchJob *pJob, int8_t newStatus) { break; case JOB_TASK_STATUS_SUCCEED: case JOB_TASK_STATUS_FAILED: - case JOB_TASK_STATUS_CANCELLING: if (newStatus != JOB_TASK_STATUS_DROPPING) { SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); } break; - case JOB_TASK_STATUS_CANCELLED: case JOB_TASK_STATUS_DROPPING: SCH_ERR_JRET(TSDB_CODE_QRY_JOB_FREED); break; @@ -238,8 +278,67 @@ int32_t schChkUpdateJobStatus(SSchJob *pJob, int8_t newStatus) { _return: SCH_JOB_ELOG("invalid job status update, from %s to %s", jobTaskStatusStr(oriStatus), jobTaskStatusStr(newStatus)); - SCH_ERR_RET(code); + SCH_RET(code); +} + + +void schEndOperation(SSchJob *pJob) { + int32_t op = atomic_load_32(&pJob->opStatus.op); + if (SCH_OP_NULL == op) { + SCH_JOB_DLOG("job already not in any operation, status:%s", jobTaskStatusStr(pJob->status)); + return; + } + + atomic_store_32(&pJob->opStatus.op, SCH_OP_NULL); + + SCH_JOB_DLOG("job end %s operation", schGetOpStr(op)); +} + +int32_t schBeginOperation(SSchJob *pJob, SCH_OP_TYPE type, bool sync) { + int32_t code = 0; + int8_t status = 0; + + if (schJobNeedToStop(pJob, &status)) { + SCH_JOB_ELOG("abort op %s cause of job need to stop", schGetOpStr(type)); + SCH_ERR_JRET(pJob->errCode); + } + + if (SCH_OP_NULL != atomic_val_compare_exchange_32(&pJob->opStatus.op, SCH_OP_NULL, type)) { + SCH_JOB_ELOG("job already in %s operation", schGetOpStr(pJob->opStatus.op)); + SCH_ERR_JRET(TSDB_CODE_TSC_APP_ERROR); + } + + SCH_JOB_DLOG("job start %s operation", schGetOpStr(pJob->opStatus.op)); + + pJob->opStatus.sync = sync; + + switch (type) { + case SCH_OP_EXEC: + SCH_ERR_JRET(schUpdateJobStatus(pJob, JOB_TASK_STATUS_EXECUTING)); + break; + case SCH_OP_FETCH: + if (!SCH_JOB_NEED_FETCH(pJob)) { + SCH_JOB_ELOG("no need to fetch data, status:%s", SCH_GET_JOB_STATUS_STR(pJob)); + SCH_ERR_JRET(TSDB_CODE_QRY_APP_ERROR); + } + + if (status != JOB_TASK_STATUS_PARTIAL_SUCCEED) { + SCH_JOB_ELOG("job status error for fetch, status:%s", jobTaskStatusStr(status)); + SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR); + } + break; + default: + SCH_JOB_ELOG("unknown operation type %d", type); + SCH_ERR_JRET(TSDB_CODE_TSC_APP_ERROR); + } + return TSDB_CODE_SUCCESS; + +_return: + + schEndOperation(pJob); + + SCH_RET(code); } int32_t schBuildTaskRalation(SSchJob *pJob, SHashObj *planToTask) { @@ -278,7 +377,7 @@ int32_t schBuildTaskRalation(SSchJob *pJob, SHashObj *planToTask) { SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); } - SCH_TASK_DLOG("children info, the %d child TID %" PRIx64, n, (*childTask)->taskId); + SCH_TASK_DLOG("children info, the %d child TID 0x%" PRIx64, n, (*childTask)->taskId); } if (parentNum > 0) { @@ -312,7 +411,7 @@ int32_t schBuildTaskRalation(SSchJob *pJob, SHashObj *planToTask) { SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); } - SCH_TASK_DLOG("parents info, the %d parent TID %" PRIx64, n, (*parentTask)->taskId); + SCH_TASK_DLOG("parents info, the %d parent TID 0x%" PRIx64, n, (*parentTask)->taskId); } SCH_TASK_DLOG("level:%d, parentNum:%d, childNum:%d", i, parentNum, childNum); @@ -785,37 +884,6 @@ int32_t schHandleTaskRetry(SSchJob *pJob, SSchTask *pTask) { return TSDB_CODE_SUCCESS; } -void schUpdateJobErrCode(SSchJob *pJob, int32_t errCode) { - if (TSDB_CODE_SUCCESS == errCode) { - return; - } - - int32_t origCode = atomic_load_32(&pJob->errCode); - if (TSDB_CODE_SUCCESS == origCode) { - if (origCode == atomic_val_compare_exchange_32(&pJob->errCode, origCode, errCode)) { - goto _return; - } - - origCode = atomic_load_32(&pJob->errCode); - } - - if (NEED_CLIENT_HANDLE_ERROR(origCode)) { - return; - } - - if (NEED_CLIENT_HANDLE_ERROR(errCode)) { - atomic_store_32(&pJob->errCode, errCode); - goto _return; - } - - return; - -_return: - - SCH_JOB_DLOG("job errCode updated to %x - %s", errCode, tstrerror(errCode)); -} - - int32_t schSetJobQueryRes(SSchJob* pJob, SQueryResult* pRes) { pRes->code = atomic_load_32(&pJob->errCode); pRes->numOfRows = pJob->resNumOfRows; @@ -828,7 +896,7 @@ int32_t schSetJobQueryRes(SSchJob* pJob, SQueryResult* pRes) { int32_t schSetJobFetchRes(SSchJob* pJob, void** pData) { int32_t code = 0; if (pJob->resData && ((SRetrieveTableRsp *)pJob->resData)->completed) { - SCH_ERR_RET(schChkUpdateJobStatus(pJob, JOB_TASK_STATUS_SUCCEED)); + SCH_ERR_RET(schUpdateJobStatus(pJob, JOB_TASK_STATUS_SUCCEED)); } while (true) { @@ -855,15 +923,17 @@ int32_t schSetJobFetchRes(SSchJob* pJob, void** pData) { return TSDB_CODE_SUCCESS; } -int32_t schNotifyUserQueryRes(SSchJob* pJob) { - pJob->userRes.queryRes = taosMemoryCalloc(1, sizeof(*pJob->userRes.queryRes)); - if (pJob->userRes.queryRes) { - schSetJobQueryRes(pJob, pJob->userRes.queryRes); +int32_t schNotifyUserExecRes(SSchJob* pJob) { + SQueryResult* pRes = taosMemoryCalloc(1, sizeof(SQueryResult)); + if (pRes) { + schSetJobQueryRes(pJob, pRes); } - (*pJob->userRes.execFp)(pJob->userRes.queryRes, pJob->userRes.userParam, atomic_load_32(&pJob->errCode)); + schEndOperation(pJob); - pJob->userRes.queryRes = NULL; + SCH_JOB_DLOG("sch start to invoke exec cb, code: %s", tstrerror(pJob->errCode)); + (*pJob->userRes.execFp)(pRes, pJob->userRes.userParam, atomic_load_32(&pJob->errCode)); + SCH_JOB_DLOG("sch end from query cb, code: %s", tstrerror(pJob->errCode)); return TSDB_CODE_SUCCESS; } @@ -871,36 +941,52 @@ int32_t schNotifyUserQueryRes(SSchJob* pJob) { int32_t schNotifyUserFetchRes(SSchJob* pJob) { void* pRes = NULL; - SCH_ERR_RET(schSetJobFetchRes(pJob, &pRes)); + schSetJobFetchRes(pJob, &pRes); + schEndOperation(pJob); + + SCH_JOB_DLOG("sch start to invoke fetch cb, code: %s", tstrerror(pJob->errCode)); (*pJob->userRes.fetchFp)(pRes, pJob->userRes.userParam, atomic_load_32(&pJob->errCode)); + SCH_JOB_DLOG("sch end from fetch cb, code: %s", tstrerror(pJob->errCode)); return TSDB_CODE_SUCCESS; } +void schPostJobRes(SSchJob *pJob, SCH_OP_TYPE op) { + if (SCH_OP_NULL == pJob->opStatus.op) { + SCH_JOB_DLOG("job not in any op, no need to post job res, status:%s", jobTaskStatusStr(pJob->status)); + return; + } + + if (op && pJob->opStatus.op != op) { + SCH_JOB_ELOG("job in op %s mis-match with expected %s", schGetOpStr(pJob->opStatus.op), schGetOpStr(op)); + return; + } + + if (SCH_JOB_IN_SYNC_OP(pJob)) { + tsem_post(&pJob->rspSem); + } else if (SCH_JOB_IN_ASYNC_EXEC_OP(pJob)) { + schNotifyUserExecRes(pJob); + } else if (SCH_JOB_IN_ASYNC_FETCH_OP(pJob)) { + schNotifyUserFetchRes(pJob); + } else { + SCH_JOB_ELOG("job not in any operation, status:%s", jobTaskStatusStr(pJob->status)); + } +} + int32_t schProcessOnJobFailureImpl(SSchJob *pJob, int32_t status, int32_t errCode) { // if already FAILED, no more processing - SCH_ERR_RET(schChkUpdateJobStatus(pJob, status)); + SCH_ERR_RET(schUpdateJobStatus(pJob, status)); schUpdateJobErrCode(pJob, errCode); - - if (atomic_load_8(&pJob->userFetch) || pJob->attr.syncSchedule) { - tsem_post(&pJob->rspSem); - } - + int32_t code = atomic_load_32(&pJob->errCode); - - SCH_JOB_DLOG("job failed with error: %s", tstrerror(code)); - - if (!pJob->attr.syncSchedule) { - if (SCH_EXEC_CB == atomic_val_compare_exchange_32(&pJob->userCb, SCH_EXEC_CB, 0)) { - schNotifyUserQueryRes(pJob); - } else if (SCH_FETCH_CB == atomic_val_compare_exchange_32(&pJob->userCb, SCH_FETCH_CB, 0)) { - atomic_val_compare_exchange_8(&pJob->userFetch, 1, 0); - schNotifyUserFetchRes(pJob); - } + if (code) { + SCH_JOB_DLOG("job failed with error: %s", tstrerror(code)); } + schPostJobRes(pJob, 0); + SCH_RET(code); } @@ -918,20 +1004,9 @@ int32_t schProcessOnJobDropped(SSchJob *pJob, int32_t errCode) { int32_t schProcessOnJobPartialSuccess(SSchJob *pJob) { int32_t code = 0; - SCH_ERR_RET(schChkUpdateJobStatus(pJob, JOB_TASK_STATUS_PARTIAL_SUCCEED)); + SCH_ERR_RET(schUpdateJobStatus(pJob, JOB_TASK_STATUS_PARTIAL_SUCCEED)); - if (pJob->attr.syncSchedule) { - tsem_post(&pJob->rspSem); - } else if (SCH_EXEC_CB == atomic_val_compare_exchange_32(&pJob->userCb, SCH_EXEC_CB, 0)) { - schNotifyUserQueryRes(pJob); - } else if (SCH_FETCH_CB == atomic_val_compare_exchange_32(&pJob->userCb, SCH_FETCH_CB, 0)) { - atomic_val_compare_exchange_8(&pJob->userFetch, 1, 0); - schNotifyUserFetchRes(pJob); - } - - if (atomic_load_8(&pJob->userFetch)) { - SCH_ERR_JRET(schFetchFromRemote(pJob)); - } + schPostJobRes(pJob, SCH_OP_EXEC); return TSDB_CODE_SUCCESS; @@ -940,16 +1015,8 @@ _return: SCH_RET(schProcessOnJobFailure(pJob, code)); } -void schProcessOnDataFetched(SSchJob *job) { - atomic_val_compare_exchange_32(&job->remoteFetch, 1, 0); - - if (job->attr.syncSchedule) { - tsem_post(&job->rspSem); - } else if (SCH_FETCH_CB == atomic_val_compare_exchange_32(&job->userCb, SCH_FETCH_CB, 0)) { - atomic_val_compare_exchange_8(&job->userFetch, 1, 0); - - schNotifyUserFetchRes(job); - } +void schProcessOnDataFetched(SSchJob *pJob) { + schPostJobRes(pJob, SCH_OP_FETCH); } // Note: no more task error processing, handled in function internal @@ -1109,7 +1176,7 @@ int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask) { SCH_UNLOCK(SCH_WRITE, &parent->lock); if (SCH_TASK_READY_FOR_LAUNCH(readyNum, parent)) { - SCH_TASK_DLOG("all %d children task done, start to launch parent task %" PRIx64, readyNum, parent->taskId); + SCH_TASK_DLOG("all %d children task done, start to launch parent task 0x%" PRIx64, readyNum, parent->taskId); SCH_ERR_RET(schLaunchTask(pJob, parent)); } } @@ -1127,15 +1194,8 @@ _return: int32_t schFetchFromRemote(SSchJob *pJob) { int32_t code = 0; - if (atomic_val_compare_exchange_32(&pJob->remoteFetch, 0, 1) != 0) { - SCH_JOB_ELOG("prior fetching not finished, remoteFetch:%d", atomic_load_32(&pJob->remoteFetch)); - return TSDB_CODE_SUCCESS; - } - void *resData = atomic_load_ptr(&pJob->resData); if (resData) { - atomic_val_compare_exchange_32(&pJob->remoteFetch, 1, 0); - SCH_JOB_DLOG("res already fetched, res:%p", resData); return TSDB_CODE_SUCCESS; } @@ -1146,8 +1206,6 @@ int32_t schFetchFromRemote(SSchJob *pJob) { _return: - atomic_val_compare_exchange_32(&pJob->remoteFetch, 1, 0); - SCH_RET(schProcessOnTaskFailure(pJob, pJob->fetchTask, code)); } @@ -1291,7 +1349,7 @@ int32_t schGetTaskFromList(SHashObj *pTaskList, uint64_t taskId, SSchTask **pTas int32_t schGetTaskInJob(SSchJob *pJob, uint64_t taskId, SSchTask **pTask) { schGetTaskFromList(pJob->taskList, taskId, pTask); if (NULL == *pTask) { - SCH_JOB_ELOG("task not found in job task list, taskId:%" PRIx64, taskId); + SCH_JOB_ELOG("task not found in job task list, taskId:0x%" PRIx64, taskId); SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR); } @@ -1382,8 +1440,6 @@ int32_t schLaunchLevelTasks(SSchJob *pJob, SSchLevel *level) { int32_t schLaunchJob(SSchJob *pJob) { SSchLevel *level = taosArrayGet(pJob->levels, pJob->levelIdx); - SCH_ERR_RET(schChkUpdateJobStatus(pJob, JOB_TASK_STATUS_EXECUTING)); - SCH_ERR_RET(schChkJobNeedFlowCtrl(pJob, level)); SCH_ERR_RET(schLaunchLevelTasks(pJob, level)); @@ -1466,9 +1522,9 @@ void schFreeJobImpl(void *job) { taosMemoryFreeClear(pJob->userRes.queryRes); taosMemoryFreeClear(pJob->resData); - taosMemoryFreeClear(pJob); + taosMemoryFree(pJob); - qDebug("QID:0x%" PRIx64 " job freed, refId:%" PRIx64 ", pointer:%p", queryId, refId, pJob); + qDebug("QID:0x%" PRIx64 " sch job freed, refId:0x%" PRIx64 ", pointer:%p", queryId, refId, pJob); int32_t jobNum = atomic_sub_fetch_32(&schMgmt.jobNum, 1); if (jobNum == 0) { @@ -1483,26 +1539,36 @@ int32_t schExecJobImpl(SSchedulerReq *pReq, int64_t *job, SQueryResult* pRes, bo int32_t code = 0; SSchJob *pJob = NULL; - SCH_ERR_RET(schInitJob(pReq, &pJob, pRes, sync)); + SCH_ERR_JRET(schInitJob(pReq, &pJob, pRes, sync)); - qDebug("QID:0x%" PRIx64 " job refId 0x%"PRIx64 " started", pReq->pDag->queryId, pJob->refId); + qDebug("QID:0x%" PRIx64 " sch job refId 0x%"PRIx64 " started", pReq->pDag->queryId, pJob->refId); *job = pJob->refId; - if (!sync) { - pJob->userCb = SCH_EXEC_CB; - } + SCH_ERR_JRET(schBeginOperation(pJob, SCH_OP_EXEC, sync)); - SCH_ERR_JRET(schLaunchJob(pJob)); + code = schLaunchJob(pJob); if (sync) { SCH_JOB_DLOG("will wait for rsp now, job status:%s", SCH_GET_JOB_STATUS_STR(pJob)); tsem_wait(&pJob->rspSem); + + schEndOperation(pJob); + } else if (code) { + schPostJobRes(pJob, SCH_OP_EXEC); } - SCH_JOB_DLOG("job exec done, job status:%s, jobId:0x%"PRIx64, SCH_GET_JOB_STATUS_STR(pJob), pJob->refId); + SCH_JOB_DLOG("job exec done, job status:%s, jobId:0x%" PRIx64, SCH_GET_JOB_STATUS_STR(pJob), pJob->refId); + + schReleaseJob(pJob->refId); + + SCH_RET(code); _return: + if (!sync) { + pReq->fp(NULL, pReq->cbParam, code); + } + schReleaseJob(pJob->refId); SCH_RET(code); @@ -1536,10 +1602,10 @@ int32_t schAsyncExecJob(SSchedulerReq *pReq, int64_t *pJob) { *pJob = 0; if (EXPLAIN_MODE_STATIC == pReq->pDag->explainInfo.mode) { - SCH_ERR_RET(schExecStaticExplainJob(pReq, pJob, false)); - } else { - SCH_ERR_RET(schExecJobImpl(pReq, pJob, NULL, false)); + SCH_RET(schExecStaticExplainJob(pReq, pJob, false)); } + + SCH_ERR_RET(schExecJobImpl(pReq, pJob, NULL, false)); return code; } @@ -1550,19 +1616,29 @@ int32_t schExecStaticExplainJob(SSchedulerReq *pReq, int64_t *job, bool sync) { int32_t code = 0; SSchJob *pJob = taosMemoryCalloc(1, sizeof(SSchJob)); if (NULL == pJob) { - qError("QID:%" PRIx64 " calloc %d failed", pReq->pDag->queryId, (int32_t)sizeof(SSchJob)); - SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); + qError("QID:0x%" PRIx64 " calloc %d failed", pReq->pDag->queryId, (int32_t)sizeof(SSchJob)); + code = TSDB_CODE_QRY_OUT_OF_MEMORY; + pReq->fp(NULL, pReq->cbParam, code); + SCH_ERR_RET(code); } pJob->sql = pReq->sql; + pJob->reqKilled = pReq->reqKilled; pJob->attr.queryJob = true; - pJob->attr.syncSchedule = sync; pJob->attr.explainMode = pReq->pDag->explainInfo.mode; pJob->queryId = pReq->pDag->queryId; pJob->subPlans = pReq->pDag->pSubplans; pJob->userRes.execFp = pReq->fp; pJob->userRes.userParam = pReq->cbParam; + schUpdateJobStatus(pJob, JOB_TASK_STATUS_NOT_START); + + code = schBeginOperation(pJob, SCH_OP_EXEC, sync); + if (code) { + pReq->fp(NULL, pReq->cbParam, code); + SCH_ERR_RET(code); + } + SCH_ERR_JRET(qExecStaticExplain(pReq->pDag, (SRetrieveTableRsp **)&pJob->resData)); int64_t refId = taosAddRef(schMgmt.jobRef, pJob); @@ -1572,21 +1648,23 @@ int32_t schExecStaticExplainJob(SSchedulerReq *pReq, int64_t *job, bool sync) { } if (NULL == schAcquireJob(refId)) { - SCH_JOB_ELOG("schAcquireJob job failed, refId:%" PRIx64, refId); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); + SCH_JOB_ELOG("schAcquireJob job failed, refId:0x%" PRIx64, refId); + SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR); } pJob->refId = refId; - SCH_JOB_DLOG("job refId:%" PRIx64, pJob->refId); + SCH_JOB_DLOG("job refId:0x%" PRIx64, pJob->refId); pJob->status = JOB_TASK_STATUS_PARTIAL_SUCCEED; *job = pJob->refId; SCH_JOB_DLOG("job exec done, job status:%s", SCH_GET_JOB_STATUS_STR(pJob)); - - if (!pJob->attr.syncSchedule) { - code = schNotifyUserQueryRes(pJob); + + if (!sync) { + schPostJobRes(pJob, SCH_OP_EXEC); + } else { + schEndOperation(pJob); } schReleaseJob(pJob->refId); @@ -1595,56 +1673,29 @@ int32_t schExecStaticExplainJob(SSchedulerReq *pReq, int64_t *job, bool sync) { _return: + schEndOperation(pJob); + if (!sync) { + pReq->fp(NULL, pReq->cbParam, code); + } + schFreeJobImpl(pJob); + SCH_RET(code); } int32_t schFetchRows(SSchJob *pJob) { int32_t code = 0; - int8_t status = SCH_GET_JOB_STATUS(pJob); - if (status == JOB_TASK_STATUS_DROPPING) { - SCH_JOB_ELOG("job is dropping, status:%s", jobTaskStatusStr(status)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); - } - - if (!SCH_JOB_NEED_FETCH(pJob)) { - SCH_JOB_ELOG("no need to fetch data, status:%s", SCH_GET_JOB_STATUS_STR(pJob)); - SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR); - } - - if (atomic_val_compare_exchange_8(&pJob->userFetch, 0, 1) != 0) { - SCH_JOB_ELOG("prior fetching not finished, userFetch:%d", atomic_load_8(&pJob->userFetch)); - SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR); - } - - if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) { - SCH_JOB_ELOG("job failed or dropping, status:%s", jobTaskStatusStr(status)); - SCH_ERR_JRET(atomic_load_32(&pJob->errCode)); - } else if (status == JOB_TASK_STATUS_SUCCEED) { - SCH_JOB_DLOG("job already succeed, status:%s", jobTaskStatusStr(status)); - goto _return; - } else if (status != JOB_TASK_STATUS_PARTIAL_SUCCEED) { - SCH_JOB_ELOG("job status error for fetch, status:%s", jobTaskStatusStr(status)); - SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR); - } - if (!(pJob->attr.explainMode == EXPLAIN_MODE_STATIC)) { SCH_ERR_JRET(schFetchFromRemote(pJob)); tsem_wait(&pJob->rspSem); - - status = SCH_GET_JOB_STATUS(pJob); - if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) { - SCH_JOB_ELOG("job failed or dropping, status:%s", jobTaskStatusStr(status)); - SCH_ERR_JRET(atomic_load_32(&pJob->errCode)); - } } SCH_ERR_JRET(schSetJobFetchRes(pJob, pJob->userRes.fetchRes)); _return: - atomic_val_compare_exchange_8(&pJob->userFetch, 1, 0); + schEndOperation(pJob); SCH_RET(code); } @@ -1652,50 +1703,14 @@ _return: int32_t schAsyncFetchRows(SSchJob *pJob) { int32_t code = 0; - int8_t status = SCH_GET_JOB_STATUS(pJob); - if (status == JOB_TASK_STATUS_DROPPING) { - SCH_JOB_ELOG("job is dropping, status:%s", jobTaskStatusStr(status)); - SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); - } - - if (!SCH_JOB_NEED_FETCH(pJob)) { - SCH_JOB_ELOG("no need to fetch data, status:%s", SCH_GET_JOB_STATUS_STR(pJob)); - SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR); - } - - if (atomic_val_compare_exchange_8(&pJob->userFetch, 0, 1) != 0) { - SCH_JOB_ELOG("prior fetching not finished, userFetch:%d", atomic_load_8(&pJob->userFetch)); - SCH_ERR_RET(TSDB_CODE_QRY_APP_ERROR); - } - - if (JOB_TASK_STATUS_FAILED == status || JOB_TASK_STATUS_DROPPING == status) { - SCH_JOB_ELOG("job failed or dropping, status:%s", jobTaskStatusStr(status)); - SCH_ERR_JRET(atomic_load_32(&pJob->errCode)); - } else if (status == JOB_TASK_STATUS_SUCCEED) { - SCH_JOB_DLOG("job already succeed, status:%s", jobTaskStatusStr(status)); - goto _return; - } else if (status != JOB_TASK_STATUS_PARTIAL_SUCCEED) { - SCH_JOB_ELOG("job status error for fetch, status:%s", jobTaskStatusStr(status)); - SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR); - } - if (pJob->attr.explainMode == EXPLAIN_MODE_STATIC) { - atomic_val_compare_exchange_8(&pJob->userFetch, 1, 0); - - SCH_ERR_JRET(schNotifyUserFetchRes(pJob)); - } else { - pJob->userCb = SCH_FETCH_CB; - - SCH_ERR_JRET(schFetchFromRemote(pJob)); + schPostJobRes(pJob, SCH_OP_FETCH); + return TSDB_CODE_SUCCESS; } + + SCH_ERR_RET(schFetchFromRemote(pJob)); return TSDB_CODE_SUCCESS; - -_return: - - atomic_val_compare_exchange_8(&pJob->userFetch, 1, 0); - - SCH_RET(code); } diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index ddcb53d62f..0bd747785c 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -315,8 +315,6 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch return TSDB_CODE_SUCCESS; } - atomic_val_compare_exchange_32(&pJob->remoteFetch, 1, 0); - SCH_ERR_JRET(schFetchFromRemote(pJob)); taosMemoryFreeClear(msg); @@ -346,7 +344,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch } case TDMT_VND_DROP_TASK_RSP: { // SHOULD NEVER REACH HERE - SCH_TASK_ELOG("invalid status to handle drop task rsp, refId:%" PRIx64, pJob->refId); + SCH_TASK_ELOG("invalid status to handle drop task rsp, refId:0x%" PRIx64, pJob->refId); SCH_ERR_JRET(TSDB_CODE_SCH_INTERNAL_ERROR); break; } @@ -376,7 +374,7 @@ int32_t schHandleCallback(void *param, const SDataBuf *pMsg, int32_t msgType, in SSchJob *pJob = schAcquireJob(pParam->refId); if (NULL == pJob) { - qWarn("QID:0x%" PRIx64 ",TID:0x%" PRIx64 "taosAcquireRef job failed, may be dropped, refId:%" PRIx64, + qWarn("QID:0x%" PRIx64 ",TID:0x%" PRIx64 "taosAcquireRef job failed, may be dropped, refId:0x%" PRIx64, pParam->queryId, pParam->taskId, pParam->refId); SCH_ERR_JRET(TSDB_CODE_QRY_JOB_FREED); } @@ -445,7 +443,7 @@ int32_t schHandleExplainCallback(void *param, const SDataBuf *pMsg, int32_t code int32_t schHandleDropCallback(void *param, const SDataBuf *pMsg, int32_t code) { SSchTaskCallbackParam *pParam = (SSchTaskCallbackParam *)param; - qDebug("QID:%" PRIx64 ",TID:%" PRIx64 " drop task rsp received, code:%x", pParam->queryId, pParam->taskId, code); + qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 " drop task rsp received, code:0x%x", pParam->queryId, pParam->taskId, code); taosMemoryFreeClear(param); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/scheduler/src/schUtil.c b/source/libs/scheduler/src/schUtil.c index 66483187da..73077cbf0f 100644 --- a/source/libs/scheduler/src/schUtil.c +++ b/source/libs/scheduler/src/schUtil.c @@ -21,6 +21,18 @@ #include "tref.h" #include "trpc.h" +char* schGetOpStr(SCH_OP_TYPE type) { + switch (type) { + case SCH_OP_NULL: + return "NULL"; + case SCH_OP_EXEC: + return "EXEC"; + case SCH_OP_FETCH: + return "FETCH"; + default: + return "UNKNOWN"; + } +} void schCleanClusterHb(void* pTrans) { SCH_LOCK(SCH_WRITE, &schMgmt.hbLock); @@ -188,7 +200,7 @@ int32_t schUpdateHbConnection(SQueryNodeEpId *epId, SSchTrans *trans) { SCH_UNLOCK(SCH_WRITE, &hb->lock); SCH_UNLOCK(SCH_READ, &schMgmt.hbLock); - qDebug("hb connection updated, sId:%" PRIx64 ", nodeId:%d, fqdn:%s, port:%d, pTrans:%p, pHandle:%p", schMgmt.sId, + qDebug("hb connection updated, sId:0x%" PRIx64 ", nodeId:%d, fqdn:%s, port:%d, pTrans:%p, pHandle:%p", schMgmt.sId, epId->nodeId, epId->ep.fqdn, epId->ep.port, trans->pTrans, trans->pHandle); return TSDB_CODE_SUCCESS; diff --git a/source/libs/scheduler/src/scheduler.c b/source/libs/scheduler/src/scheduler.c index 0eaeeae9cb..57a405ffa3 100644 --- a/source/libs/scheduler/src/scheduler.c +++ b/source/libs/scheduler/src/scheduler.c @@ -62,12 +62,14 @@ int32_t schedulerInit(SSchedulerCfg *cfg) { SCH_ERR_RET(TSDB_CODE_QRY_SYS_ERROR); } - qInfo("scheduler %" PRIx64 " initizlized, maxJob:%u", schMgmt.sId, schMgmt.cfg.maxJobNum); + qInfo("scheduler 0x%" PRIx64 " initizlized, maxJob:%u", schMgmt.sId, schMgmt.cfg.maxJobNum); return TSDB_CODE_SUCCESS; } int32_t schedulerExecJob(SSchedulerReq *pReq, int64_t *pJob, SQueryResult *pRes) { + qDebug("scheduler sync exec job start"); + if (NULL == pReq || NULL == pJob || NULL == pRes) { SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); } @@ -76,21 +78,27 @@ int32_t schedulerExecJob(SSchedulerReq *pReq, int64_t *pJob, SQueryResult *pRes) } int32_t schedulerAsyncExecJob(SSchedulerReq *pReq, int64_t *pJob) { + qDebug("scheduler async exec job start"); + int32_t code = 0; if (NULL == pReq || NULL == pJob) { - code = TSDB_CODE_QRY_INVALID_INPUT; - } else { - code = schAsyncExecJob(pReq, pJob); + SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); } + + schAsyncExecJob(pReq, pJob); + +_return: if (code != TSDB_CODE_SUCCESS) { pReq->fp(NULL, pReq->cbParam, code); } - return code; + SCH_RET(code); } int32_t schedulerFetchRows(int64_t job, void **pData) { + qDebug("scheduler sync fetch rows start"); + if (NULL == pData) { SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT); } @@ -102,7 +110,8 @@ int32_t schedulerFetchRows(int64_t job, void **pData) { SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); } - pJob->attr.syncSchedule = true; + SCH_ERR_RET(schBeginOperation(pJob, SCH_OP_FETCH, true)); + pJob->userRes.fetchRes = pData; code = schFetchRows(pJob); @@ -112,24 +121,32 @@ int32_t schedulerFetchRows(int64_t job, void **pData) { } void schedulerAsyncFetchRows(int64_t job, schedulerFetchCallback fp, void* param) { + qDebug("scheduler async fetch rows start"); + + int32_t code = 0; if (NULL == fp || NULL == param) { - fp(NULL, param, TSDB_CODE_QRY_INVALID_INPUT); - return; + SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT); } SSchJob *pJob = schAcquireJob(job); if (NULL == pJob) { - qError("acquire job from jobRef list failed, may be dropped, jobId:0x%" PRIx64, job); - fp(NULL, param, TSDB_CODE_SCH_STATUS_ERROR); - return; + qError("acquire sch job from job list failed, may be dropped, jobId:0x%" PRIx64, job); + SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR); } - pJob->attr.syncSchedule = false; + SCH_ERR_JRET(schBeginOperation(pJob, SCH_OP_FETCH, false)); + pJob->userRes.fetchFp = fp; pJob->userRes.userParam = param; - /*code = */schAsyncFetchRows(pJob); + SCH_ERR_JRET(schAsyncFetchRows(pJob)); +_return: + + if (code) { + fp(NULL, param, code); + } + schReleaseJob(job); } @@ -137,12 +154,12 @@ int32_t schedulerGetTasksStatus(int64_t job, SArray *pSub) { int32_t code = 0; SSchJob *pJob = schAcquireJob(job); if (NULL == pJob) { - qDebug("acquire job from jobRef list failed, may not started or dropped, refId:%" PRIx64, job); + qDebug("acquire job from jobRef list failed, may not started or dropped, refId:0x%" PRIx64, job); SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR); } if (pJob->status < JOB_TASK_STATUS_NOT_START || pJob->levelNum <= 0 || NULL == pJob->levels) { - qDebug("job not initialized or not executable job, refId:%" PRIx64, job); + qDebug("job not initialized or not executable job, refId:0x%" PRIx64, job); SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR); } @@ -188,21 +205,23 @@ void schedulerStopQueryHb(void *pTrans) { schCleanClusterHb(pTrans); } -void schedulerFreeJob(int64_t job) { +void schedulerFreeJob(int64_t job, int32_t errCode) { SSchJob *pJob = schAcquireJob(job); if (NULL == pJob) { qError("acquire job from jobRef list failed, may be dropped, jobId:0x%" PRIx64, job); return; } - if (atomic_load_8(&pJob->userFetch) > 0) { - schProcessOnJobDropped(pJob, TSDB_CODE_QRY_JOB_FREED); + int32_t code = schProcessOnJobDropped(pJob, errCode); + if (TSDB_CODE_SCH_JOB_IS_DROPPING == code) { + SCH_JOB_DLOG("sch job is already dropping, refId:0x%" PRIx64, job); + return; } - SCH_JOB_DLOG("start to remove job from jobRef list, refId:%" PRIx64, job); + SCH_JOB_DLOG("start to remove job from jobRef list, refId:0x%" PRIx64, job); if (taosRemoveRef(schMgmt.jobRef, job)) { - SCH_JOB_ELOG("remove job from job list failed, refId:%" PRIx64, job); + SCH_JOB_ELOG("remove job from job list failed, refId:0x%" PRIx64, job); } schReleaseJob(job); diff --git a/source/libs/scheduler/test/schedulerTests.cpp b/source/libs/scheduler/test/schedulerTests.cpp index 9b624ee5cd..e5cc3cd481 100644 --- a/source/libs/scheduler/test/schedulerTests.cpp +++ b/source/libs/scheduler/test/schedulerTests.cpp @@ -457,7 +457,7 @@ void schtFreeQueryJob(int32_t freeThread) { int64_t job = queryJobRefId; if (job && atomic_val_compare_exchange_64(&queryJobRefId, job, 0)) { - schedulerFreeJob(job); + schedulerFreeJob(job, 0); if (freeThread) { if (++freeNum % schtTestPrintNum == 0) { printf("FreeNum:%d\n", freeNum); @@ -724,7 +724,7 @@ TEST(queryTest, normalCase) { schReleaseJob(job); - schedulerFreeJob(job); + schedulerFreeJob(job, 0); schtFreeQueryDag(&dag); @@ -828,7 +828,7 @@ TEST(queryTest, readyFirstCase) { schReleaseJob(job); - schedulerFreeJob(job); + schedulerFreeJob(job, 0); schtFreeQueryDag(&dag); @@ -940,7 +940,7 @@ TEST(queryTest, flowCtrlCase) { schReleaseJob(job); - schedulerFreeJob(job); + schedulerFreeJob(job, 0); schtFreeQueryDag(&dag); @@ -994,7 +994,7 @@ TEST(insertTest, normalCase) { ASSERT_EQ(code, 0); ASSERT_EQ(res.numOfRows, 20); - schedulerFreeJob(insertJobRefId); + schedulerFreeJob(insertJobRefId, 0); schedulerDestroy(); } diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 59ec2b5ceb..ca10e7d956 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -134,6 +134,7 @@ int32_t streamBuildDispatchMsg(SStreamTask* pTask, SStreamDataBlock* data, SRpcM int32_t sz = taosArrayGetSize(vgInfo); for (int32_t i = 0; i < sz; i++) { SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i); + ASSERT(pVgInfo->vgId > 0); if (hashValue >= pVgInfo->hashBegin && hashValue <= pVgInfo->hashEnd) { vgId = pVgInfo->vgId; downstreamTaskId = pVgInfo->taskId; diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index 7a7d9d15ad..a35e7679a1 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -70,7 +70,7 @@ int32_t tEncodeSStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) { if (tEncodeSEpSet(pEncoder, &pTask->fixedEpDispatcher.epSet) < 0) return -1; } else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) { if (tSerializeSUseDbRspImp(pEncoder, &pTask->shuffleDispatcher.dbInfo) < 0) return -1; - /*if (tEncodeI8(pEncoder, pTask->shuffleDispatcher.hashMethod) < 0) return -1;*/ + if (tEncodeCStr(pEncoder, pTask->shuffleDispatcher.stbFullName) < 0) return -1; } if (tEncodeI64(pEncoder, pTask->triggerParam) < 0) return -1; @@ -119,8 +119,8 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask) { if (tDecodeI32(pDecoder, &pTask->fixedEpDispatcher.nodeId) < 0) return -1; if (tDecodeSEpSet(pDecoder, &pTask->fixedEpDispatcher.epSet) < 0) return -1; } else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) { - /*if (tDecodeI8(pDecoder, &pTask->shuffleDispatcher.hashMethod) < 0) return -1;*/ if (tDeserializeSUseDbRspImp(pDecoder, &pTask->shuffleDispatcher.dbInfo) < 0) return -1; + if (tDecodeCStrTo(pDecoder, pTask->shuffleDispatcher.stbFullName) < 0) return -1; } if (tDecodeI64(pDecoder, &pTask->triggerParam) < 0) return -1; diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h index b00c7cbda1..b6225c79cd 100644 --- a/source/libs/sync/inc/syncInt.h +++ b/source/libs/sync/inc/syncInt.h @@ -171,7 +171,8 @@ void syncNodeClose(SSyncNode* pSyncNode); int32_t syncNodePropose(SSyncNode* pSyncNode, const SRpcMsg* pMsg, bool isWeak); // option -bool syncNodeSnapshotEnable(SSyncNode* pSyncNode); +bool syncNodeSnapshotEnable(SSyncNode* pSyncNode); +SyncIndex syncNodeGetSnapshotConfigIndex(SSyncNode* pSyncNode, SyncIndex snapshotLastApplyIndex); // ping -------------- int32_t syncNodePing(SSyncNode* pSyncNode, const SRaftId* destRaftId, SyncPing* pMsg); diff --git a/source/libs/sync/inc/syncRaftCfg.h b/source/libs/sync/inc/syncRaftCfg.h index cd64402738..9969a0b974 100644 --- a/source/libs/sync/inc/syncRaftCfg.h +++ b/source/libs/sync/inc/syncRaftCfg.h @@ -47,14 +47,15 @@ typedef struct SRaftCfg { SRaftCfg *raftCfgOpen(const char *path); int32_t raftCfgClose(SRaftCfg *pRaftCfg); int32_t raftCfgPersist(SRaftCfg *pRaftCfg); +int32_t raftCfgAddConfigIndex(SRaftCfg *pRaftCfg, SyncIndex configIndex); -cJSON * syncCfg2Json(SSyncCfg *pSyncCfg); -char * syncCfg2Str(SSyncCfg *pSyncCfg); +cJSON *syncCfg2Json(SSyncCfg *pSyncCfg); +char *syncCfg2Str(SSyncCfg *pSyncCfg); int32_t syncCfgFromJson(const cJSON *pRoot, SSyncCfg *pSyncCfg); int32_t syncCfgFromStr(const char *s, SSyncCfg *pSyncCfg); -cJSON * raftCfg2Json(SRaftCfg *pRaftCfg); -char * raftCfg2Str(SRaftCfg *pRaftCfg); +cJSON *raftCfg2Json(SRaftCfg *pRaftCfg); +char *raftCfg2Str(SRaftCfg *pRaftCfg); int32_t raftCfgFromJson(const cJSON *pRoot, SRaftCfg *pRaftCfg); int32_t raftCfgFromStr(const char *s, SRaftCfg *pRaftCfg); diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index 7b2f79e24c..2c17d0f3ed 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -208,8 +208,9 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { SRpcMsg rpcMsg; syncEntry2OriginalRpc(pRollBackEntry, &rpcMsg); - SFsmCbMeta cbMeta; + SFsmCbMeta cbMeta = {0}; cbMeta.index = pRollBackEntry->index; + cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index); cbMeta.isWeak = pRollBackEntry->isWeak; cbMeta.code = 0; cbMeta.state = ths->state; @@ -234,8 +235,9 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { if (ths->pFsm != NULL) { // if (ths->pFsm->FpPreCommitCb != NULL && pAppendEntry->originalRpcType != TDMT_SYNC_NOOP) { if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pAppendEntry->originalRpcType)) { - SFsmCbMeta cbMeta; + SFsmCbMeta cbMeta = {0}; cbMeta.index = pAppendEntry->index; + cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index); cbMeta.isWeak = pAppendEntry->isWeak; cbMeta.code = 2; cbMeta.state = ths->state; @@ -266,8 +268,9 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) { if (ths->pFsm != NULL) { // if (ths->pFsm->FpPreCommitCb != NULL && pAppendEntry->originalRpcType != TDMT_SYNC_NOOP) { if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pAppendEntry->originalRpcType)) { - SFsmCbMeta cbMeta; + SFsmCbMeta cbMeta = {0}; cbMeta.index = pAppendEntry->index; + cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index); cbMeta.isWeak = pAppendEntry->isWeak; cbMeta.code = 3; cbMeta.state = ths->state; @@ -696,8 +699,9 @@ static int32_t syncNodeMakeLogSame(SSyncNode* ths, SyncAppendEntries* pMsg) { SRpcMsg rpcMsg; syncEntry2OriginalRpc(pRollBackEntry, &rpcMsg); - SFsmCbMeta cbMeta; + SFsmCbMeta cbMeta = {0}; cbMeta.index = pRollBackEntry->index; + cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index); cbMeta.isWeak = pRollBackEntry->isWeak; cbMeta.code = 0; cbMeta.state = ths->state; @@ -713,7 +717,7 @@ static int32_t syncNodeMakeLogSame(SSyncNode* ths, SyncAppendEntries* pMsg) { // delete confict entries code = ths->pLogStore->syncLogTruncate(ths->pLogStore, delBegin); ASSERT(code == 0); - sDebug("vgId:%d sync event %s commitIndex:%ld currentTerm:%lu log truncate, from %ld to %ld", ths->vgId, + sDebug("vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu log truncate, from %ld to %ld", ths->vgId, syncUtilState2String(ths->state), ths->commitIndex, ths->pRaftStore->currentTerm, delBegin, delEnd); logStoreSimpleLog2("after syncNodeMakeLogSame", ths->pLogStore); @@ -725,8 +729,9 @@ static int32_t syncNodePreCommit(SSyncNode* ths, SSyncRaftEntry* pEntry) { syncEntry2OriginalRpc(pEntry, &rpcMsg); if (ths->pFsm != NULL) { if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pEntry->originalRpcType)) { - SFsmCbMeta cbMeta; + SFsmCbMeta cbMeta = {0}; cbMeta.index = pEntry->index; + cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index); cbMeta.isWeak = pEntry->isWeak; cbMeta.code = 2; cbMeta.state = ths->state; @@ -1062,7 +1067,7 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs ths->commitIndex = snapshot.lastApplyIndex; sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu commit by snapshot from index:%ld to index:%ld", + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu commit by snapshot from index:%ld to index:%ld", ths->vgId, syncUtilState2String(ths->state), ths->commitIndex, ths->pRaftStore->currentTerm, commitBegin, commitEnd); } diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c index f934f9a268..eabd14bf28 100644 --- a/source/libs/sync/src/syncAppendEntriesReply.c +++ b/source/libs/sync/src/syncAppendEntriesReply.c @@ -190,7 +190,7 @@ int32_t syncNodeOnAppendEntriesReplySnapshotCb(SSyncNode* ths, SyncAppendEntries if (gRaftDetailLog) { char* s = snapshotSender2Str(pSender); sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu snapshot send to %s:%d start sender first time, " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu snapshot send to %s:%d start sender first time, " "lastApplyIndex:%ld " "lastApplyTerm:%lu " "lastConfigIndex:%ld privateTerm:%lu " @@ -201,7 +201,7 @@ int32_t syncNodeOnAppendEntriesReplySnapshotCb(SSyncNode* ths, SyncAppendEntries taosMemoryFree(s); } else { sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu snapshot send to %s:%d start sender first time, " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu snapshot send to %s:%d start sender first time, " "lastApplyIndex:%ld " "lastApplyTerm:%lu lastConfigIndex:%ld privateTerm:%lu", ths->vgId, syncUtilState2String(ths->state), ths->commitIndex, ths->pRaftStore->currentTerm, host, port, diff --git a/source/libs/sync/src/syncCommit.c b/source/libs/sync/src/syncCommit.c index d010728c78..d563e5aacc 100644 --- a/source/libs/sync/src/syncCommit.c +++ b/source/libs/sync/src/syncCommit.c @@ -56,7 +56,7 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) { SyncIndex commitEnd = snapshot.lastApplyIndex; pSyncNode->commitIndex = snapshot.lastApplyIndex; - sDebug("vgId:%d sync event %s commitIndex:%ld currentTerm:%lu commit by snapshot from index:%ld to index:%ld", + sDebug("vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu commit by snapshot from index:%ld to index:%ld", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), pSyncNode->commitIndex, pSyncNode->pRaftStore->currentTerm, pSyncNode->commitIndex, snapshot.lastApplyIndex); } diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 3e5dcfea42..5abe98b425 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -414,7 +414,7 @@ int32_t syncGetSnapshotMeta(int64_t rid, struct SSnapshotMeta* sMeta) { assert(rid == pSyncNode->rid); sMeta->lastConfigIndex = pSyncNode->pRaftCfg->lastConfigIndex; - sTrace("sync get snapshot meta: lastConfigIndex:%ld", pSyncNode->pRaftCfg->lastConfigIndex); + sTrace("vgId:%d, get snapshot meta, lastConfigIndex:%" PRId64, pSyncNode->vgId, pSyncNode->pRaftCfg->lastConfigIndex); taosReleaseRef(tsNodeRefId, pSyncNode->rid); return 0; @@ -437,12 +437,28 @@ int32_t syncGetSnapshotMetaByIndex(int64_t rid, SyncIndex snapshotIndex, struct } } sMeta->lastConfigIndex = lastIndex; - sTrace("sync get snapshot meta by index:%ld lastConfigIndex:%ld", snapshotIndex, sMeta->lastConfigIndex); + sTrace("vgId:%d, get snapshot meta by index:%" PRId64 " lastConfigIndex:%" PRId64, pSyncNode->vgId, snapshotIndex, + sMeta->lastConfigIndex); taosReleaseRef(tsNodeRefId, pSyncNode->rid); return 0; } +SyncIndex syncNodeGetSnapshotConfigIndex(SSyncNode* pSyncNode, SyncIndex snapshotLastApplyIndex) { + ASSERT(pSyncNode->pRaftCfg->configIndexCount >= 1); + SyncIndex lastIndex = (pSyncNode->pRaftCfg->configIndexArr)[0]; + + for (int i = 0; i < pSyncNode->pRaftCfg->configIndexCount; ++i) { + if ((pSyncNode->pRaftCfg->configIndexArr)[i] > lastIndex && + (pSyncNode->pRaftCfg->configIndexArr)[i] <= snapshotLastApplyIndex) { + lastIndex = (pSyncNode->pRaftCfg->configIndexArr)[i]; + } + } + + sTrace("sync syncNodeGetSnapshotConfigIndex index:%ld lastConfigIndex:%ld", snapshotLastApplyIndex, lastIndex); + return lastIndex; +} + const char* syncGetMyRoleStr(int64_t rid) { const char* s = syncUtilState2String(syncGetMyRole(rid)); return s; @@ -598,7 +614,7 @@ int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak) { return -1; } assert(rid == pSyncNode->rid); - sDebug("vgId:%d sync event %s commitIndex:%ld currentTerm:%lu propose msgType:%s,%d", pSyncNode->vgId, + sDebug("vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu propose msgType:%s,%d", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), pSyncNode->commitIndex, pSyncNode->pRaftStore->currentTerm, TMSG_INFO(pMsg->msgType), pMsg->msgType); ret = syncNodePropose(pSyncNode, pMsg, isWeak); @@ -609,7 +625,7 @@ int32_t syncPropose(int64_t rid, const SRpcMsg* pMsg, bool isWeak) { int32_t syncNodePropose(SSyncNode* pSyncNode, const SRpcMsg* pMsg, bool isWeak) { int32_t ret = 0; - sDebug("vgId:%d sync event %s commitIndex:%ld currentTerm:%lu propose msgType:%s,%d", pSyncNode->vgId, + sDebug("vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu propose msgType:%s,%d", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), pSyncNode->commitIndex, pSyncNode->pRaftStore->currentTerm, TMSG_INFO(pMsg->msgType), pMsg->msgType); @@ -857,7 +873,7 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) { // snapshot meta // pSyncNode->sMeta.lastConfigIndex = -1; - sDebug("vgId:%d sync event %s commitIndex:%ld currentTerm:%lu sync open", pSyncNode->vgId, + sDebug("vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu sync open", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), pSyncNode->commitIndex, pSyncNode->pRaftStore->currentTerm); return pSyncNode; @@ -905,7 +921,7 @@ void syncNodeStartStandBy(SSyncNode* pSyncNode) { } void syncNodeClose(SSyncNode* pSyncNode) { - sDebug("vgId:%d sync event %s commitIndex:%ld currentTerm:%lu sync close", pSyncNode->vgId, + sDebug("vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu sync close", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), pSyncNode->commitIndex, pSyncNode->pRaftStore->currentTerm); int32_t ret; @@ -1294,7 +1310,7 @@ char* syncNode2SimpleStr(const SSyncNode* pSyncNode) { int len = 256; char* s = (char*)taosMemoryMalloc(len); snprintf(s, len, - "syncNode: vgId:%d currentTerm:%lu, commitIndex:%ld, state:%d %s, isStandBy:%d, " + "syncNode: vgId:%d, currentTerm:%lu, commitIndex:%ld, state:%d %s, isStandBy:%d, " "electTimerLogicClock:%lu, " "electTimerLogicClockUser:%lu, " "electTimerMS:%d, replicaNum:%d", @@ -1345,7 +1361,7 @@ void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncIndex SSyncSnapshotSender* oldSenders[TSDB_MAX_REPLICA]; for (int i = 0; i < TSDB_MAX_REPLICA; ++i) { oldSenders[i] = (pSyncNode->senders)[i]; - sDebug("vgId:%d sync event %s commitIndex:%ld currentTerm:%lu save senders %d, %p, privateTerm:%lu", + sDebug("vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu save senders %d, %p, privateTerm:%lu", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), pSyncNode->commitIndex, pSyncNode->pRaftStore->currentTerm, i, oldSenders[i], oldSenders[i]->privateTerm); if (gRaftDetailLog) { @@ -1400,7 +1416,7 @@ void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncIndex uint16_t port; syncUtilU642Addr((pSyncNode->replicasId)[i].addr, host, sizeof(host), &port); sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu reset sender for %lu, newIndex:%d, %s:%d, %p, " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu reset sender for %lu, newIndex:%d, %s:%d, %p, " "privateTerm:%lu", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), pSyncNode->commitIndex, pSyncNode->pRaftStore->currentTerm, (pSyncNode->replicasId)[i].addr, i, host, port, oldSenders[j], @@ -1413,7 +1429,7 @@ void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncIndex int32_t oldreplicaIndex = (pSyncNode->senders)[i]->replicaIndex; (pSyncNode->senders)[i]->replicaIndex = i; sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu udpate replicaIndex from %d to %d, %s:%d, %p, " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu udpate replicaIndex from %d to %d, %s:%d, %p, " "reset:%d", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), pSyncNode->commitIndex, pSyncNode->pRaftStore->currentTerm, oldreplicaIndex, i, host, port, (pSyncNode->senders)[i], reset); @@ -1426,7 +1442,7 @@ void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncIndex if ((pSyncNode->senders)[i] == NULL) { (pSyncNode->senders)[i] = snapshotSenderCreate(pSyncNode, i); sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu create new sender %p replicaIndex:%d, privateTerm:%lu", + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu create new sender %p replicaIndex:%d, privateTerm:%lu", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), pSyncNode->commitIndex, pSyncNode->pRaftStore->currentTerm, (pSyncNode->senders)[i], i, (pSyncNode->senders)[i]->privateTerm); } @@ -1436,7 +1452,7 @@ void syncNodeUpdateConfig(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncIndex for (int i = 0; i < TSDB_MAX_REPLICA; ++i) { if (oldSenders[i] != NULL) { snapshotSenderDestroy(oldSenders[i]); - sDebug("vgId:%d sync event %s commitIndex:%ld currentTerm:%lu delete old sender %p replicaIndex:%d", + sDebug("vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu delete old sender %p replicaIndex:%d", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), pSyncNode->commitIndex, pSyncNode->pRaftStore->currentTerm, oldSenders[i], i); oldSenders[i] = NULL; @@ -1509,7 +1525,7 @@ void syncNodeUpdateTerm(SSyncNode* pSyncNode, SyncTerm term) { void syncNodeBecomeFollower(SSyncNode* pSyncNode, const char* debugStr) { sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu become follower, isStandBy:%d, replicaNum:%d, " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu become follower, isStandBy:%d, replicaNum:%d, " "restoreFinish:%d, %s", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), pSyncNode->commitIndex, pSyncNode->pRaftStore->currentTerm, pSyncNode->pRaftCfg->isStandBy, pSyncNode->replicaNum, @@ -1551,7 +1567,7 @@ void syncNodeBecomeLeader(SSyncNode* pSyncNode, const char* debugStr) { pSyncNode->restoreFinish = false; sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu become leader, isStandBy:%d, replicaNum:%d, " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu become leader, isStandBy:%d, replicaNum:%d, " "restoreFinish:%d, %s", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), pSyncNode->commitIndex, pSyncNode->pRaftStore->currentTerm, pSyncNode->pRaftCfg->isStandBy, pSyncNode->replicaNum, @@ -1857,7 +1873,7 @@ static void syncNodeEqPingTimer(void* param, void* tmrId) { if (pSyncNode->FpEqMsg != NULL) { int32_t code = pSyncNode->FpEqMsg(pSyncNode->msgcb, &rpcMsg); if (code != 0) { - sError("vgId:%d sync enqueue ping msg error, code:%d", pSyncNode->vgId, code); + sError("vgId:%d, sync enqueue ping msg error, code:%d", pSyncNode->vgId, code); rpcFreeCont(rpcMsg.pCont); syncTimeoutDestroy(pSyncMsg); return; @@ -1891,7 +1907,7 @@ static void syncNodeEqElectTimer(void* param, void* tmrId) { if (pSyncNode->FpEqMsg != NULL) { int32_t code = pSyncNode->FpEqMsg(pSyncNode->msgcb, &rpcMsg); if (code != 0) { - sError("vgId:%d sync enqueue elect msg error, code:%d", pSyncNode->vgId, code); + sError("vgId:%d, sync enqueue elect msg error, code:%d", pSyncNode->vgId, code); rpcFreeCont(rpcMsg.pCont); syncTimeoutDestroy(pSyncMsg); return; @@ -1929,7 +1945,7 @@ static void syncNodeEqHeartbeatTimer(void* param, void* tmrId) { if (pSyncNode->FpEqMsg != NULL) { int32_t code = pSyncNode->FpEqMsg(pSyncNode->msgcb, &rpcMsg); if (code != 0) { - sError("vgId:%d sync enqueue timer msg error, code:%d", pSyncNode->vgId, code); + sError("vgId:%d, sync enqueue timer msg error, code:%d", pSyncNode->vgId, code); rpcFreeCont(rpcMsg.pCont); syncTimeoutDestroy(pSyncMsg); return; @@ -2067,8 +2083,9 @@ int32_t syncNodeOnClientRequestCb(SSyncNode* ths, SyncClientRequest* pMsg) { if (ths->pFsm != NULL) { // if (ths->pFsm->FpPreCommitCb != NULL && pEntry->originalRpcType != TDMT_SYNC_NOOP) { if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pEntry->originalRpcType)) { - SFsmCbMeta cbMeta; + SFsmCbMeta cbMeta = {0}; cbMeta.index = pEntry->index; + cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index); cbMeta.isWeak = pEntry->isWeak; cbMeta.code = 0; cbMeta.state = ths->state; @@ -2089,8 +2106,9 @@ int32_t syncNodeOnClientRequestCb(SSyncNode* ths, SyncClientRequest* pMsg) { if (ths->pFsm != NULL) { // if (ths->pFsm->FpPreCommitCb != NULL && pEntry->originalRpcType != TDMT_SYNC_NOOP) { if (ths->pFsm->FpPreCommitCb != NULL && syncUtilUserPreCommit(pEntry->originalRpcType)) { - SFsmCbMeta cbMeta; + SFsmCbMeta cbMeta = {0}; cbMeta.index = pEntry->index; + cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index); cbMeta.isWeak = pEntry->isWeak; cbMeta.code = 1; cbMeta.state = ths->state; @@ -2129,12 +2147,12 @@ const char* syncStr(ESyncState state) { static int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* pEntry) { SyncLeaderTransfer* pSyncLeaderTransfer = syncLeaderTransferFromRpcMsg2(pRpcMsg); - sDebug("vgId:%d sync event %s commitIndex:%ld currentTerm:%lu begin leader transfer", ths->vgId, + sDebug("vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu begin leader transfer", ths->vgId, syncUtilState2String(ths->state), ths->commitIndex, ths->pRaftStore->currentTerm); if (strcmp(pSyncLeaderTransfer->newNodeInfo.nodeFqdn, ths->myNodeInfo.nodeFqdn) == 0 && pSyncLeaderTransfer->newNodeInfo.nodePort == ths->myNodeInfo.nodePort) { - sDebug("vgId:%d sync event %s commitIndex:%ld currentTerm:%lu maybe leader transfer to %s:%d %lu", ths->vgId, + sDebug("vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu maybe leader transfer to %s:%d %lu", ths->vgId, syncUtilState2String(ths->state), ths->commitIndex, ths->pRaftStore->currentTerm, pSyncLeaderTransfer->newNodeInfo.nodeFqdn, pSyncLeaderTransfer->newNodeInfo.nodePort, pSyncLeaderTransfer->newLeaderId.addr); @@ -2154,11 +2172,12 @@ static int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftE } */ if (ths->pFsm->FpLeaderTransferCb != NULL) { - SFsmCbMeta cbMeta; + SFsmCbMeta cbMeta = {0}; cbMeta.code = 0; cbMeta.currentTerm = ths->pRaftStore->currentTerm; cbMeta.flag = 0; cbMeta.index = pEntry->index; + cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index); cbMeta.isWeak = pEntry->isWeak; cbMeta.seqNum = pEntry->seqNum; cbMeta.state = ths->state; @@ -2260,6 +2279,7 @@ static int32_t syncNodeConfigChange(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftE cbMeta.code = 0; cbMeta.currentTerm = ths->pRaftStore->currentTerm; cbMeta.index = pEntry->index; + cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, pEntry->index); cbMeta.term = pEntry->term; cbMeta.newCfg = newSyncCfg; cbMeta.oldCfg = oldSyncCfg; @@ -2275,7 +2295,7 @@ static int32_t syncNodeConfigChange(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftE int32_t syncNodeCommit(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endIndex, uint64_t flag) { int32_t code = 0; ESyncState state = flag; - sDebug("vgId:%d sync event %s commitIndex:%ld currentTerm:%lu commit by wal from index:%" PRId64 " to index:%" PRId64 + sDebug("vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu commit by wal from index:%" PRId64 " to index:%" PRId64 ", %s", ths->vgId, syncUtilState2String(ths->state), ths->commitIndex, ths->pRaftStore->currentTerm, beginIndex, endIndex, syncUtilState2String(state)); @@ -2294,8 +2314,9 @@ int32_t syncNodeCommit(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endIndex, // user commit if (ths->pFsm->FpCommitCb != NULL && syncUtilUserCommit(pEntry->originalRpcType)) { - SFsmCbMeta cbMeta; + SFsmCbMeta cbMeta = {0}; cbMeta.index = pEntry->index; + cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(ths, cbMeta.index); cbMeta.isWeak = pEntry->isWeak; cbMeta.code = 0; cbMeta.state = ths->state; @@ -2309,6 +2330,8 @@ int32_t syncNodeCommit(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endIndex, // config change if (pEntry->originalRpcType == TDMT_SYNC_CONFIG_CHANGE) { + raftCfgAddConfigIndex(ths->pRaftCfg, pEntry->index); + raftCfgPersist(ths->pRaftCfg); code = syncNodeConfigChange(ths, &rpcMsg, pEntry); ASSERT(code == 0); } @@ -2327,7 +2350,7 @@ int32_t syncNodeCommit(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endIndex, ths->pFsm->FpRestoreFinishCb(ths->pFsm); } ths->restoreFinish = true; - sDebug("vgId:%d sync event %s commitIndex:%ld currentTerm:%lu restore finish, %s, index:%ld", ths->vgId, + sDebug("vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu restore finish, %s, index:%ld", ths->vgId, syncUtilState2String(ths->state), ths->commitIndex, ths->pRaftStore->currentTerm, syncUtilState2String(ths->state), pEntry->index); } diff --git a/source/libs/sync/src/syncRaftCfg.c b/source/libs/sync/src/syncRaftCfg.c index 2d51f1f6f0..8831704d7c 100644 --- a/source/libs/sync/src/syncRaftCfg.c +++ b/source/libs/sync/src/syncRaftCfg.c @@ -66,6 +66,13 @@ int32_t raftCfgPersist(SRaftCfg *pRaftCfg) { return 0; } +int32_t raftCfgAddConfigIndex(SRaftCfg *pRaftCfg, SyncIndex configIndex) { + ASSERT(pRaftCfg->configIndexCount <= MAX_CONFIG_INDEX_COUNT); + (pRaftCfg->configIndexArr)[pRaftCfg->configIndexCount] = configIndex; + ++(pRaftCfg->configIndexCount); + return 0; +} + cJSON *syncCfg2Json(SSyncCfg *pSyncCfg) { char u64buf[128] = {0}; cJSON *pRoot = cJSON_CreateObject(); diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c index 87dfef5fdd..8fff7ae6de 100644 --- a/source/libs/sync/src/syncRaftLog.c +++ b/source/libs/sync/src/syncRaftLog.c @@ -164,7 +164,7 @@ static int32_t raftLogAppendEntry(struct SSyncLogStore* pLogStore, SSyncRaftEntr walFsync(pWal, true); sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu write index:%ld, isStandBy:%d, msgType:%s,%d, " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu write index:%ld, isStandBy:%d, msgType:%s,%d, " "originalRpcType:%s,%d", pData->pSyncNode->vgId, syncUtilState2String(pData->pSyncNode->state), pData->pSyncNode->commitIndex, pData->pSyncNode->pRaftStore->currentTerm, pEntry->index, pData->pSyncNode->pRaftCfg->isStandBy, @@ -325,7 +325,7 @@ int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry) { walFsync(pWal, true); sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu old write index:%ld, isStandBy:%d, msgType:%s,%d, " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu old write index:%ld, isStandBy:%d, msgType:%s,%d, " "originalRpcType:%s,%d", pData->pSyncNode->vgId, syncUtilState2String(pData->pSyncNode->state), pData->pSyncNode->commitIndex, pData->pSyncNode->pRaftStore->currentTerm, pEntry->index, pData->pSyncNode->pRaftCfg->isStandBy, diff --git a/source/libs/sync/src/syncRespMgr.c b/source/libs/sync/src/syncRespMgr.c index 551c2611b0..831e286d76 100644 --- a/source/libs/sync/src/syncRespMgr.c +++ b/source/libs/sync/src/syncRespMgr.c @@ -47,7 +47,7 @@ int64_t syncRespMgrAdd(SSyncRespMgr *pObj, SRespStub *pStub) { SSyncNode *pSyncNode = pObj->data; sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu resp mgr add, msgType:%s,%d seq:%lu handle:%p ahandle:%p", + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu resp mgr add, msgType:%s,%d seq:%lu handle:%p ahandle:%p", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), pSyncNode->commitIndex, pSyncNode->pRaftStore->currentTerm, TMSG_INFO(pStub->rpcMsg.msgType), pStub->rpcMsg.msgType, keyCode, pStub->rpcMsg.info.handle, pStub->rpcMsg.info.ahandle); @@ -74,7 +74,7 @@ int32_t syncRespMgrGet(SSyncRespMgr *pObj, uint64_t index, SRespStub *pStub) { SSyncNode *pSyncNode = pObj->data; sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu resp mgr get, msgType:%s,%d seq:%lu handle:%p " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu resp mgr get, msgType:%s,%d seq:%lu handle:%p " "ahandle:%p", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), pSyncNode->commitIndex, pSyncNode->pRaftStore->currentTerm, TMSG_INFO(pStub->rpcMsg.msgType), pStub->rpcMsg.msgType, index, @@ -96,7 +96,7 @@ int32_t syncRespMgrGetAndDel(SSyncRespMgr *pObj, uint64_t index, SRespStub *pStu SSyncNode *pSyncNode = pObj->data; sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu resp mgr get and del, msgType:%s,%d seq:%lu handle:%p " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu resp mgr get and del, msgType:%s,%d seq:%lu handle:%p " "ahandle:%p", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), pSyncNode->commitIndex, pSyncNode->pRaftStore->currentTerm, TMSG_INFO(pStub->rpcMsg.msgType), pStub->rpcMsg.msgType, index, diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c index 4973d36295..ba796c2aff 100644 --- a/source/libs/sync/src/syncSnapshot.c +++ b/source/libs/sync/src/syncSnapshot.c @@ -50,6 +50,7 @@ SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode, int32_t replicaI } else { sError("snapshotSenderCreate cannot create sender"); } + return pSender; } @@ -84,6 +85,10 @@ void snapshotSenderStart(SSyncSnapshotSender *pSender) { // get current snapshot info pSender->pSyncNode->pFsm->FpGetSnapshot(pSender->pSyncNode->pFsm, &(pSender->snapshot)); + + sTrace("snapshotSenderStart lastApplyIndex:%ld, lastApplyTerm:%lu, lastConfigIndex:%ld", + pSender->snapshot.lastApplyIndex, pSender->snapshot.lastApplyTerm, pSender->snapshot.lastConfigIndex); + if (pSender->snapshot.lastConfigIndex != SYNC_INDEX_INVALID) { /* SSyncRaftEntry *pEntry = NULL; @@ -141,7 +146,7 @@ void snapshotSenderStart(SSyncSnapshotSender *pSender) { if (gRaftDetailLog) { char *msgStr = syncSnapshotSend2Str(pMsg); sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu snapshot send to %s:%d begin seq:%d ack:%d " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu snapshot send to %s:%d begin seq:%d ack:%d " "lastApplyIndex:%ld " "lastApplyTerm:%lu " "lastConfigIndex:%ld privateTerm:%lu send " @@ -153,7 +158,7 @@ void snapshotSenderStart(SSyncSnapshotSender *pSender) { taosMemoryFree(msgStr); } else { sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu snapshot send to %s:%d begin seq:%d ack:%d " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu snapshot send to %s:%d begin seq:%d ack:%d " "lastApplyIndex:%ld " "lastApplyTerm:%lu " "lastConfigIndex:%ld privateTerm:%lu", @@ -287,7 +292,7 @@ int32_t snapshotSend(SSyncSnapshotSender *pSender) { if (gRaftDetailLog) { char *msgStr = syncSnapshotSend2Str(pMsg); sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu snapshot send to %s:%d finish seq:%d ack:%d " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu snapshot send to %s:%d finish seq:%d ack:%d " "lastApplyIndex:%ld " "lastApplyTerm:%lu " "lastConfigIndex:%ld privateTerm:%lu send " @@ -299,7 +304,7 @@ int32_t snapshotSend(SSyncSnapshotSender *pSender) { taosMemoryFree(msgStr); } else { sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu snapshot send to %s:%d finish seq:%d ack:%d " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu snapshot send to %s:%d finish seq:%d ack:%d " "lastApplyIndex:%ld " "lastApplyTerm:%lu " "lastConfigIndex:%ld privateTerm:%lu", @@ -310,7 +315,7 @@ int32_t snapshotSend(SSyncSnapshotSender *pSender) { } } else { sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu snapshot send to %s:%d sending seq:%d ack:%d " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu snapshot send to %s:%d sending seq:%d ack:%d " "lastApplyIndex:%ld " "lastApplyTerm:%lu " "lastConfigIndex:%ld privateTerm:%lu", @@ -349,7 +354,7 @@ int32_t snapshotReSend(SSyncSnapshotSender *pSender) { if (gRaftDetailLog) { char *msgStr = syncSnapshotSend2Str(pMsg); sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu snapshot send to %s:%d resend seq:%d ack:%d " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu snapshot send to %s:%d resend seq:%d ack:%d " "privateTerm:%lu send " "msg:%s", pSender->pSyncNode->vgId, syncUtilState2String(pSender->pSyncNode->state), pSender->pSyncNode->commitIndex, @@ -358,7 +363,7 @@ int32_t snapshotReSend(SSyncSnapshotSender *pSender) { taosMemoryFree(msgStr); } else { sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu snapshot send to %s:%d resend seq:%d ack:%d " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu snapshot send to %s:%d resend seq:%d ack:%d " "privateTerm:%lu", pSender->pSyncNode->vgId, syncUtilState2String(pSender->pSyncNode->state), pSender->pSyncNode->commitIndex, pSender->pSyncNode->pRaftStore->currentTerm, host, port, pSender->seq, pSender->ack, pSender->privateTerm); @@ -421,7 +426,7 @@ cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender) { char *snapshotSender2Str(SSyncSnapshotSender *pSender) { cJSON *pJson = snapshotSender2Json(pSender); - char * serialized = cJSON_Print(pJson); + char *serialized = cJSON_Print(pJson); cJSON_Delete(pJson); return serialized; } @@ -542,7 +547,7 @@ cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) { cJSON_AddStringToObject(pFromId, "addr", u64buf); { uint64_t u64 = pReceiver->fromId.addr; - cJSON * pTmp = pFromId; + cJSON *pTmp = pFromId; char host[128] = {0}; uint16_t port; syncUtilU642Addr(u64, host, sizeof(host), &port); @@ -566,7 +571,7 @@ cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) { char *snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver) { cJSON *pJson = snapshotReceiver2Json(pReceiver); - char * serialized = cJSON_Print(pJson); + char *serialized = cJSON_Print(pJson); cJSON_Delete(pJson); return serialized; } @@ -594,7 +599,7 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { if (gRaftDetailLog) { char *msgStr = syncSnapshotSend2Str(pMsg); sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d begin ack:%d, " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d begin ack:%d, " "lastIndex:%ld, " "lastTerm:%lu, " "lastConfigIndex:%ld, privateTerm:%lu, recv msg:%s", @@ -604,7 +609,7 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { taosMemoryFree(msgStr); } else { sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d begin ack:%d, " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d begin ack:%d, " "lastIndex:%ld, " "lastTerm:%lu, " "lastConfigIndex:%ld privateTerm:%lu", @@ -648,7 +653,7 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { bool isDrop; if (IamInNew) { sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu update config by snapshot, lastIndex:%ld, " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu update config by snapshot, lastIndex:%ld, " "lastTerm:%lu, " "lastConfigIndex:%ld ", pSyncNode->vgId, syncUtilState2String(pSyncNode->state), pSyncNode->commitIndex, @@ -656,7 +661,7 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { syncNodeUpdateConfig(pSyncNode, &newSyncCfg, pMsg->lastConfigIndex, &isDrop); } else { sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu do not update config by snapshot, I am not in " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu do not update config by snapshot, I am not in " "newCfg, " "lastIndex:%ld, lastTerm:%lu, " "lastConfigIndex:%ld ", @@ -694,7 +699,7 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { if (gRaftDetailLog) { char *logSimpleStr = logStoreSimple2Str(pSyncNode->pLogStore); sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d finish, update log begin " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d finish, update log begin " "index:%ld, " "snapshot.lastApplyIndex:%ld, " "snapshot.lastApplyTerm:%lu, snapshot.lastConfigIndex:%ld, privateTerm:%lu, raft log:%s", @@ -704,7 +709,7 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { taosMemoryFree(logSimpleStr); } else { sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d finish, update log begin " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d finish, update log begin " "index:%ld, " "snapshot.lastApplyIndex:%ld, " "snapshot.lastApplyTerm:%lu, snapshot.lastConfigIndex:%ld, privateTerm:%lu", @@ -721,7 +726,7 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { if (gRaftDetailLog) { char *msgStr = syncSnapshotSend2Str(pMsg); sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d end ack:%d, " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d end ack:%d, " "lastIndex:%ld, lastTerm:%lu, " "lastConfigIndex:%ld, privateTerm:%lu, recv msg:%s", pReceiver->pSyncNode->vgId, syncUtilState2String(pSyncNode->state), pSyncNode->commitIndex, @@ -730,7 +735,7 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { taosMemoryFree(msgStr); } else { sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d end ack:%d, " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d end ack:%d, " "lastIndex:%ld, lastTerm:%lu, " "lastConfigIndex:%ld, privateTerm:%lu", pReceiver->pSyncNode->vgId, syncUtilState2String(pSyncNode->state), pSyncNode->commitIndex, @@ -750,7 +755,7 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { if (gRaftDetailLog) { char *msgStr = syncSnapshotSend2Str(pMsg); sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d force close ack:%d, " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d force close ack:%d, " "lastIndex:%ld, " "lastTerm:%lu, " "lastConfigIndex:%ld, privateTerm:%lu, recv " @@ -761,7 +766,7 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { taosMemoryFree(msgStr); } else { sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d force close ack:%d, " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d force close ack:%d, " "lastIndex:%ld, " "lastTerm:%lu, " "lastConfigIndex:%ld, privateTerm:%lu", @@ -787,7 +792,7 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { if (gRaftDetailLog) { char *msgStr = syncSnapshotSend2Str(pMsg); sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d receiving ack:%d, " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d receiving ack:%d, " "lastIndex:%ld, " "lastTerm:%lu, " "lastConfigIndex:%ld, privateTerm:%lu, recv msg:%s", @@ -797,7 +802,7 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) { taosMemoryFree(msgStr); } else { sDebug( - "vgId:%d sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d receiving ack:%d, " + "vgId:%d, sync event %s commitIndex:%ld currentTerm:%lu snapshot recv from %s:%d receiving ack:%d, " "lastIndex:%ld, " "lastTerm:%lu, " "lastConfigIndex:%ld, privateTerm:%lu", diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c index a04e8b5fca..9191b60518 100644 --- a/source/libs/transport/src/transComm.c +++ b/source/libs/transport/src/transComm.c @@ -376,17 +376,19 @@ static void transDQTimeout(uv_timer_t* timer) { SDelayQueue* queue = timer->data; tTrace("timer %p timeout", timer); uint64_t timeout = 0; + int64_t current = taosGetTimestampMs(); do { HeapNode* minNode = heapMin(queue->heap); if (minNode == NULL) break; SDelayTask* task = container_of(minNode, SDelayTask, node); - if (task->execTime <= taosGetTimestampMs()) { + + if (task->execTime <= current) { heapRemove(queue->heap, minNode); task->func(task->arg); taosMemoryFree(task); timeout = 0; } else { - timeout = task->execTime - taosGetTimestampMs(); + timeout = task->execTime - current; break; } } while (1); diff --git a/source/os/src/osSocket.c b/source/os/src/osSocket.c index 4a0d9e2866..4d61e7036d 100644 --- a/source/os/src/osSocket.c +++ b/source/os/src/osSocket.c @@ -946,8 +946,7 @@ int32_t taosGetFqdn(char *fqdn) { #endif // __APPLE__ int32_t ret = getaddrinfo(hostname, NULL, &hints, &result); if (!result) { - // printf("failed to get fqdn, code:%d, reason:%s", ret, gai_strerror(ret)); - assert(0); + fprintf(stderr,"failed to get fqdn, code:%d, reason:%s", ret, gai_strerror(ret)); return -1; } diff --git a/source/util/src/tenv.c b/source/util/src/tenv.c index e717e82c5b..4fc0542816 100644 --- a/source/util/src/tenv.c +++ b/source/util/src/tenv.c @@ -72,6 +72,9 @@ int32_t taosEnvToCfg(const char *envStr, char *cfgStr) { if (cfgNameLen > 0) { memcpy(cfgStr, buf, cfgNameLen); memset(&cfgStr[cfgNameLen], ' ', p - cfgStr - cfgNameLen + 1); + } else { + *cfgStr = '\0'; + return -1; } } return strlen(cfgStr); diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 933e3ba92b..7976cb09af 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -132,6 +132,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_INPUT, "Invalid tsc input") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_STMT_API_ERROR, "Stmt API usage error") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_STMT_TBNAME_ERROR, "Stmt table name not set") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_STMT_CLAUSE_ERROR, "not supported stmt clause") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_QUERY_KILLED, "Query killed") // mnode-common TAOS_DEFINE_ERROR(TSDB_CODE_MND_APP_ERROR, "Mnode internal error") @@ -455,6 +456,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_CTG_VG_META_MISMATCH, "table meta and vgroup TAOS_DEFINE_ERROR(TSDB_CODE_SCH_STATUS_ERROR, "scheduler status error") TAOS_DEFINE_ERROR(TSDB_CODE_SCH_INTERNAL_ERROR, "scheduler internal error") TAOS_DEFINE_ERROR(TSDB_CODE_SCH_TIMEOUT_ERROR, "Task timeout") +TAOS_DEFINE_ERROR(TSDB_CODE_SCH_JOB_IS_DROPPING, "Job is dropping") TAOS_DEFINE_ERROR(TSDB_CODE_QW_MSG_ERROR, "Invalid msg order") // parser @@ -577,9 +579,14 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSMA_NO_INDEX_IN_CACHE, "No tsma index in ca TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_INVALID_ENV, "Invalid rsma env") TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_INVALID_STAT, "Invalid rsma state") +//tq +TAOS_DEFINE_ERROR(TSDB_CODE_TQ_NO_COMMITTED_OFFSET, "No committed offset") + TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Index is rebuilding") +TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message") + #ifdef TAOS_ERROR_C }; #endif diff --git a/tests/script/sh/copy_udf.bat b/tests/script/sh/copy_udf.bat new file mode 100644 index 0000000000..5144cf8d25 --- /dev/null +++ b/tests/script/sh/copy_udf.bat @@ -0,0 +1,23 @@ +@echo off + +echo Executing copy_udf.bat +set SCRIPT_DIR=%cd% +echo SCRIPT_DIR: %SCRIPT_DIR% + +cd ..\..\.. +set TAOS_DIR=%cd% +echo find udf library in %TAOS_DIR% +set UDF1_DIR=%TAOS_DIR%\debug\build\lib\udf1.dll +set UDF2_DIR=%TAOS_DIR%\debug\build\lib\udf2.dll + +echo %UDF1_DIR% +echo %UDF2_DIR% + +set UDF_TMP=C:\Windows\Temp\udf +rm -rf %UDF_TMP% +mkdir %UDF_TMP% + +echo Copy udf shared library files to %UDF_TMP% + +cp %UDF1_DIR% %UDF_TMP% +cp %UDF2_DIR% %UDF_TMP% diff --git a/tests/script/tsim/db/alter_option.sim b/tests/script/tsim/db/alter_option.sim index 4351ee5cb1..fede960a6a 100644 --- a/tests/script/tsim/db/alter_option.sim +++ b/tests/script/tsim/db/alter_option.sim @@ -92,7 +92,7 @@ endi if $data5_db != no_strict then # strict return -1 endi -if $data6_db != 345600 then # duration +if $data6_db != 345600m then # duration return -1 endi if $data7_db != 1440000m,1440000m,1440000m then # keep diff --git a/tests/script/tsim/db/basic6.sim b/tests/script/tsim/db/basic6.sim index 64103b5dac..7525fe2087 100644 --- a/tests/script/tsim/db/basic6.sim +++ b/tests/script/tsim/db/basic6.sim @@ -34,7 +34,7 @@ endi if $data24 != 1 then return -1 endi -if $data26 != 2880 then +if $data26 != 2880m then return -1 endi if $data27 != 14400m,14400m,14400m then @@ -78,7 +78,7 @@ endi if $data24 != 1 then return -1 endi -if $data26 != 21600 then +if $data26 != 21600m then return -1 endi diff --git a/tests/script/tsim/db/create_all_options.sim b/tests/script/tsim/db/create_all_options.sim index 284875ee08..efe7ff99cf 100644 --- a/tests/script/tsim/db/create_all_options.sim +++ b/tests/script/tsim/db/create_all_options.sim @@ -113,7 +113,7 @@ endi if $data5_db != no_strict then # strict return -1 endi -if $data6_db != 14400 then # duration +if $data6_db != 14400m then # duration return -1 endi if $data7_db != 5256000m,5256000m,5256000m then # keep diff --git a/tests/script/tsim/query/udf.sim b/tests/script/tsim/query/udf.sim index 93cae4e391..d9821a0495 100644 --- a/tests/script/tsim/query/udf.sim +++ b/tests/script/tsim/query/udf.sim @@ -19,8 +19,14 @@ sql show databases; sql create table t (ts timestamp, f int); sql insert into t values(now, 1)(now+1s, 2); -sql create function udf1 as '/tmp/udf/libudf1.so' outputtype int bufSize 8; -sql create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8; +system_content printf %OS% +if $system_content == Windows_NT then + sql create function udf1 as 'C:\\Windows\\Temp\\udf1.dll' outputtype int bufSize 8; + sql create aggregate function udf2 as 'C:\\Windows\\Temp\\udf2.dll' outputtype double bufSize 8; +else + sql create function udf1 as '/tmp/udf/libudf1.so' outputtype int bufSize 8; + sql create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8; +endi sql show functions; if $rows != 2 then return -1 diff --git a/tests/script/tsim/show/basic.sim b/tests/script/tsim/show/basic.sim index 95201bc48e..f23d75d78b 100644 --- a/tests/script/tsim/show/basic.sim +++ b/tests/script/tsim/show/basic.sim @@ -98,7 +98,7 @@ if $rows != 1 then endi #sql select * from information_schema.`streams` sql select * from information_schema.user_tables -if $rows != 28 then +if $rows != 29 then return -1 endi #sql select * from information_schema.user_table_distributed @@ -196,7 +196,7 @@ if $rows != 1 then endi #sql select * from performance_schema.`streams` sql select * from information_schema.user_tables -if $rows != 28 then +if $rows != 29 then return -1 endi #sql select * from information_schema.user_table_distributed @@ -210,4 +210,4 @@ if $rows != 3 then endi system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode2 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode2 -s stop -x SIGINT diff --git a/tests/system-test/1-insert/alter_stable.py b/tests/system-test/1-insert/alter_stable.py index c92efb403c..cd64e3ddfe 100644 --- a/tests/system-test/1-insert/alter_stable.py +++ b/tests/system-test/1-insert/alter_stable.py @@ -77,6 +77,7 @@ class TDTestCase: tdSql.error(f'alter stable {stbname} modify column c9 double') tdSql.error(f'alter stable {stbname} modify column c10 float') tdSql.error(f'alter stable {stbname} modify column c11 int') + tdSql.error(f'alter stable {stbname} drop tag t0') tdSql.execute(f'drop database {dbname}') def alter_stable_tag_check(self,dbname,stbname,tbname): @@ -126,6 +127,11 @@ class TDTestCase: for i in ['int','unsigned int','float','binary(10)','nchar(10)']: tdSql.error(f'alter stable {stbname} modify tag t8 {i}') tdSql.error(f'alter stable {stbname} modify tag t4 int') + tdSql.error(f'alter stable {stbname} drop column t0') + #!bug TD-16410 + # tdSql.error(f'alter stable {tbname} set tag t1=100 ') + # tdSql.execute(f'create table ntb (ts timestamp,c0 int)') + tdSql.error(f'alter stable ntb add column c2 ') tdSql.execute(f'drop database {dbname}') def run(self): diff --git a/tests/system-test/1-insert/alter_table.py b/tests/system-test/1-insert/alter_table.py index 3c0def86e4..a4e40d1b0b 100644 --- a/tests/system-test/1-insert/alter_table.py +++ b/tests/system-test/1-insert/alter_table.py @@ -52,8 +52,6 @@ class TDTestCase: tdSql.execute(f'create database if not exists {dbname}') stbname = self.get_long_name(length=3, mode="letters") tbname = self.get_long_name(length=3, mode="letters") - tdLog.info('--------------------------child table tag check--------------------------------------') - tdLog.info(f'-----------------create stable {stbname} and child table {tbname}-------------------') tdSql.execute(f'create stable if not exists {dbname}.{stbname} (col_ts timestamp, c1 int) tags (tag_ts timestamp, t1 tinyint, t2 smallint, t3 int, \ t4 bigint, t5 tinyint unsigned, t6 smallint unsigned, t7 int unsigned, t8 bigint unsigned, t9 float, t10 double, t11 bool,t12 binary(20),t13 nchar(20))') tdSql.execute(f'create table if not exists {dbname}.{tbname} using {dbname}.{stbname} tags(now, 1, 2, 3, 4, 5, 6, 7, 8, 9.9, 10.1, True,"abc123","涛思数据")') @@ -90,13 +88,16 @@ class TDTestCase: tdSql.checkData(0,15,tag_nchar) # bug TD-16211 insert length more than setting binary and nchar - # tag_binary = self.get_long_name(length=21, mode="letters") - # tag_nchar = self.get_long_name(length=21, mode="letters") - # tdSql.error(f'alter table {dbname}.{tbname} set tag t12 = "{tag_binary}"') - # tdSql.error(f'alter table {dbname}.{tbname} set tag t13 = "{tag_nchar}"') - + # error_tag_binary = self.get_long_name(length=21, mode="letters") + # error_tag_nchar = self.get_long_name(length=21, mode="letters") + # tdSql.error(f'alter table {dbname}.{tbname} set tag t12 = "{error_tag_binary}"') + # tdSql.error(f'alter table {dbname}.{tbname} set tag t13 = "{error_tag_nchar}"') + error_tag_binary = self.get_long_name(length=25, mode="letters") + error_tag_nchar = self.get_long_name(length=25, mode="letters") + tdSql.error(f'alter table {dbname}.{tbname} set tag t12 = "{error_tag_binary}"') + tdSql.error(f'alter table {dbname}.{tbname} set tag t13 = "{error_tag_nchar}"') # bug TD-16210 modify binary to nchar - # tdSql.error(f'alter table {dbname}.{tbname} modify tag t12 nchar(10)') + tdSql.error(f'alter table {dbname}.{tbname} modify tag t12 nchar(10)') tdSql.execute(f"drop database {dbname}") def alter_ntb_column_check(self): ''' @@ -125,6 +126,20 @@ class TDTestCase: tdSql.execute(f'alter table {dbname}.{tbname} drop column `c15`') tdSql.query(f'describe {dbname}.{tbname}') tdSql.checkRows(14) + #! TD-16422 + # tdSql.execute(f'alter table {dbname}.{tbname} add column c16 binary(10)') + # tdSql.query(f'describe {dbname}.{tbname}') + # tdSql.checkRows(15) + # tdSql.checkEqual(tdSql.queryResult[14][2],10) + # tdSql.execute(f'alter table {dbname}.{tbname} drop column c16') + + # tdSql.execute(f'alter table {dbname}.{tbname} add column c16 nchar(10)') + # tdSql.query(f'describe {dbname}.{tbname}') + # tdSql.checkRows(15) + # tdSql.checkEqual(tdSql.queryResult[14][2],10) + # tdSql.execute(f'alter table {dbname}.{tbname} drop column c16') + + tdSql.execute(f'alter table {dbname}.{tbname} modify column c12 binary(30)') tdSql.query(f'describe {dbname}.{tbname}') tdSql.checkData(12,2,30) diff --git a/tests/system-test/1-insert/insertWithMoreVgroup.py b/tests/system-test/1-insert/insertWithMoreVgroup.py index 7708ebb476..29c293c608 100644 --- a/tests/system-test/1-insert/insertWithMoreVgroup.py +++ b/tests/system-test/1-insert/insertWithMoreVgroup.py @@ -119,7 +119,7 @@ class TDTestCase: # tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate)) return - def mutiThread_create_tables(self,host,dbname,stbname,vgroups,threadNumbers,childrowcount): + def mutiThread_create_tables(self,host,dbname,stbname,vgroups,threadNumbers,childcount): buildPath = self.getBuildPath() config = buildPath+ "../sim/dnode1/cfg/" @@ -128,7 +128,7 @@ class TDTestCase: tsql.execute("drop database if exists %s"%dbname) tsql.execute("create database %s vgroups %d"%(dbname,vgroups)) tsql.execute("use %s" %dbname) - count=int(childrowcount) + count=int(childcount) threads = [] for i in range(threadNumbers): tsql.execute("create stable %s%d(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%(stbname,i)) @@ -264,19 +264,88 @@ class TDTestCase: speedCreate=count/spendTime tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate)) return + + def checkData(self,dbname,stbname,stableCount,CtableCount,rowsPerSTable,): + tdSql.execute("use %s"%dbname) + tdSql.query("show stables") + tdSql.checkRows(stableCount) + tdSql.query("show tables") + tdSql.checkRows(CtableCount) + for i in range(stableCount): + tdSql.query("select count(*) from %s%d"%(stbname,i)) + tdSql.checkData(0,0,rowsPerSTable) + return + + # test case1 base def test_case1(self): + #stableCount=threadNumbersCtb + parameterDict = {'vgroups': 1, \ + 'threadNumbersCtb': 5, \ + 'threadNumbersIda': 5, \ + 'stableCount': 5, \ + 'tablesPerStb': 50, \ + 'rowsPerTable': 10, \ + 'dbname': 'db', \ + 'stbname': 'stb', \ + 'host': 'localhost', \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + tdLog.debug("-----create database and muti-thread create tables test------- ") #host,dbname,stbname,vgroups,threadNumbers,tcountStart,tcountStop #host, dbname, stbname, threadNumbers, chilCount, ts_start, childrowcount - self.mutiThread_create_tables(host="localhost",dbname="db",stbname="stb", vgroups=1, threadNumbers=5, childrowcount=50) - self.mutiThread_insert_data(host="localhost",dbname="db",stbname="stb", threadNumbers=5,chilCount=50,ts_start=self.ts,childrowcount=10) + self.mutiThread_create_tables( + host=parameterDict['host'], + dbname=parameterDict['dbname'], + stbname=parameterDict['stbname'], + vgroups=parameterDict['vgroups'], + threadNumbers=parameterDict['threadNumbersCtb'], + childcount=parameterDict['tablesPerStb']) - return + self.mutiThread_insert_data( + host=parameterDict['host'], + dbname=parameterDict['dbname'], + stbname=parameterDict['stbname'], + threadNumbers=parameterDict['threadNumbersIda'], + chilCount=parameterDict['tablesPerStb'], + ts_start=parameterDict['startTs'], + childrowcount=parameterDict['rowsPerTable']) + tableCount=parameterDict['threadNumbersCtb']*parameterDict['tablesPerStb'] + rowsPerStable=parameterDict['rowsPerTable']*parameterDict['tablesPerStb'] + self.checkData(dbname=parameterDict['dbname'],stbname=parameterDict['stbname'], stableCount=parameterDict['threadNumbersCtb'],CtableCount=tableCount,rowsPerSTable=rowsPerStable) + def test_case3(self): - self.taosBenchCreate("127.0.0.1","no","db1", "stb1", 1, 8, 1*10) + #stableCount=threadNumbersCtb + parameterDict = {'vgroups': 1, \ + 'threadNumbersCtb': 8, \ + 'stableCount': 5, \ + 'tablesPerStb': 10, \ + 'rowsPerTable': 100, \ + 'dbname': 'db1', \ + 'stbname': 'stb1', \ + 'host': 'localhost', \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + + self.taosBenchCreate( + parameterDict['host'], + "no", + parameterDict['dbname'], + parameterDict['stbname'], + parameterDict['vgroups'], + parameterDict['threadNumbersCtb'], + parameterDict['tablesPerStb']) + tableCount=parameterDict['threadNumbersCtb']*parameterDict['tablesPerStb'] + rowsPerStable=parameterDict['rowsPerTable']*parameterDict['tablesPerStb'] + + self.checkData( + dbname=parameterDict['dbname'], + stbname=parameterDict['stbname'], + stableCount=parameterDict['threadNumbersCtb'], + CtableCount=tableCount, + rowsPerSTable=rowsPerStable) + # self.taosBenchCreate("test209","no","db2", "stb2", 1, 8, 1*10000) # self.taosBenchCreate("chenhaoran02","no","db1", "stb1", 1, 8, 1*10000) @@ -320,14 +389,6 @@ class TDTestCase: # tdSql.execute("create qnode on dnode %s"%dnodeId) - - # self.taosBenchCreate("test209","no","db2", "stb2", 1, 8, 1*10000) - - # self.taosBenchCreate("chenhaoran02","no","db1", "stb1", 1, 8, 1*10000) - - # self.taosBenchCreate("db1", "stb1", 4, 5, 100*10000) - # self.taosBenchCreate("db1", "stb1", 1, 5, 100*10000) - # run case def run(self): diff --git a/tests/system-test/1-insert/manyVgroups.json b/tests/system-test/1-insert/manyVgroups.json index 5dea41476c..20ac320552 100644 --- a/tests/system-test/1-insert/manyVgroups.json +++ b/tests/system-test/1-insert/manyVgroups.json @@ -11,7 +11,7 @@ "confirm_parameter_prompt": "no", "insert_interval": 0, "interlace_rows": 0, - "num_of_records_per_req": 100, + "num_of_records_per_req": 100000, "databases": [ { "dbinfo": { @@ -29,7 +29,7 @@ "batch_create_tbl_num": 50000, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 1, + "insert_rows": 100, "interlace_rows": 0, "insert_interval": 0, "max_sql_len": 10000000, @@ -45,28 +45,28 @@ }, { "type": "DOUBLE", - "count": 100 + "count": 1 }, { "type": "BINARY", - "len": 400, - "count": 10 + "len": 40, + "count": 1 }, { "type": "nchar", - "len": 200, - "count": 20 + "len": 20, + "count": 1 } ], "tags": [ { "type": "TINYINT", - "count": 2 + "count": 1 }, { "type": "BINARY", "len": 16, - "count": 2 + "count": 1 } ] } diff --git a/tests/system-test/2-query/bottom.py b/tests/system-test/2-query/bottom.py index 008f59aa6a..1037b0a8f3 100644 --- a/tests/system-test/2-query/bottom.py +++ b/tests/system-test/2-query/bottom.py @@ -11,9 +11,12 @@ # -*- coding: utf-8 -*- +import random +import string from util.log import * from util.cases import * from util.sql import * +from util.common import * @@ -23,79 +26,89 @@ class TDTestCase: tdSql.init(conn.cursor()) self.rowNum = 10 + self.tbnum = 20 self.ts = 1537146000000 - - def run(self): + self.binary_str = 'taosdata' + self.nchar_str = '涛思数据' + def bottom_check_base(self): tdSql.prepare() - - tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') - tdSql.execute("create table test1 using test tags('beijing')") + tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, + col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''') + tdSql.execute("create table stb_1 using stb tags('beijing')") + column_list = ['col1','col2','col3','col4','col5','col6','col7','col8'] + error_column_list = ['col11','col12','col13'] + error_param_list = [0,101] for i in range(self.rowNum): - tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - - # bottom verifacation - tdSql.error("select bottom(ts, 10) from test") - tdSql.error("select bottom(col1, 0) from test") - tdSql.error("select bottom(col1, 101) from test") - tdSql.error("select bottom(col2, 0) from test") - tdSql.error("select bottom(col2, 101) from test") - tdSql.error("select bottom(col3, 0) from test") - tdSql.error("select bottom(col3, 101) from test") - tdSql.error("select bottom(col4, 0) from test") - tdSql.error("select bottom(col4, 101) from test") - tdSql.error("select bottom(col5, 0) from test") - tdSql.error("select bottom(col5, 101) from test") - tdSql.error("select bottom(col6, 0) from test") - tdSql.error("select bottom(col6, 101) from test") - tdSql.error("select bottom(col7, 10) from test") - tdSql.error("select bottom(col8, 10) from test") - tdSql.error("select bottom(col9, 10) from test") - - tdSql.query("select bottom(col1, 2) from test") + tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + + for i in column_list: + tdSql.query(f'select bottom({i},2) from stb_1') + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) + for j in error_param_list: + tdSql.error(f'select bottom({i},{j}) from stb_1') + for i in error_column_list: + tdSql.error(f'select bottom({i},10) from stb_1') + tdSql.query("select ts,bottom(col1, 2),ts from stb_1 group by tbname") tdSql.checkRows(2) - tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) - tdSql.query("select bottom(col2, 2) from test") - tdSql.checkRows(2) - tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) - - tdSql.query("select bottom(col3, 2) from test") - tdSql.checkRows(2) - tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) - - tdSql.query("select bottom(col4, 2) from test") - tdSql.checkRows(2) - tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) - - tdSql.query("select bottom(col11, 2) from test") - tdSql.checkRows(2) - tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) - - tdSql.query("select bottom(col12, 2) from test") - tdSql.checkRows(2) - tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) - - tdSql.query("select bottom(col13, 2) from test") - tdSql.checkRows(2) - tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) - - tdSql.query("select bottom(col13,50) from test") - tdSql.checkRows(10) - - tdSql.query("select bottom(col14, 2) from test") - tdSql.checkRows(2) - tdSql.checkEqual(tdSql.queryResult,[(2,),(1,)]) - tdSql.query("select ts,bottom(col1, 2) from test1") - tdSql.checkRows(2) - tdSql.query("select ts,bottom(col1, 2),ts from test group by tbname") - tdSql.checkRows(2) - - tdSql.query('select bottom(col2,1) from test interval(1y) order by col2') + tdSql.query('select bottom(col2,1) from stb_1 interval(1y) order by col2') tdSql.checkData(0,0,1) + tdSql.error('select * from stb_1 where bottom(col2,1)=1') + tdSql.execute('drop database db') + def bottom_check_distribute(self): + # prepare data for vgroup 4 + dbname = tdCom.getLongName(5, "letters") + stbname = tdCom.getLongName(5, "letters") + vgroup_num = 2 + child_table_num = 20 + tdSql.execute(f"create database if not exists {dbname} vgroups {vgroup_num}") + tdSql.execute(f'use {dbname}') + # build 20 child tables,every table insert 10 rows + tdSql.execute(f'''create table {stbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, + col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''') + for i in range(child_table_num): + tdSql.execute(f"create table {stbname}_{i} using {stbname} tags('beijing')") + tdSql.execute(f"insert into {stbname}_{i}(ts) values(%d)" % (self.ts - 1-i)) + column_list = ['col1','col2','col3','col4','col5','col6','col7','col8'] + error_column_list = ['col11','col12','col13'] + error_param_list = [0,101] + for i in [f'{stbname}', f'{dbname}.{stbname}']: + for j in column_list: + tdSql.query(f"select bottom({j},1) from {i}") + tdSql.checkRows(0) + tdSql.query('show tables') + vgroup_list = [] + for i in range(len(tdSql.queryResult)): + vgroup_list.append(tdSql.queryResult[i][6]) + vgroup_list_set = set(vgroup_list) - tdSql.error('select * from test where bottom(col2,1)=1') + for i in vgroup_list_set: + vgroups_num = vgroup_list.count(i) + if vgroups_num >=2: + tdLog.info(f'This scene with {vgroups_num} vgroups is ok!') + continue + else: + tdLog.exit(f'This scene does not meet the requirements with {vgroups_num} vgroup!\n') + for i in range(self.rowNum): + for j in range(child_table_num): + tdSql.execute(f"insert into {stbname}_{j} values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + for i in column_list: + tdSql.query(f'select bottom({i},2) from {stbname}') + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(1,),(1,)]) + for j in error_param_list: + tdSql.error(f'select bottom({i},{j}) from {stbname}') + for i in error_column_list: + tdSql.error(f'select bottom({i},10) from {stbname}') + + tdSql.execute(f'drop database {dbname}') + def run(self): + + self.bottom_check_base() + self.bottom_check_distribute() def stop(self): diff --git a/tests/system-test/2-query/distribute_agg_apercentile.py b/tests/system-test/2-query/distribute_agg_apercentile.py new file mode 100644 index 0000000000..fd1455ce16 --- /dev/null +++ b/tests/system-test/2-query/distribute_agg_apercentile.py @@ -0,0 +1,198 @@ +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np +import random + + +class TDTestCase: + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, + "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143, + "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.vnode_disbutes = None + self.ts = 1537146000000 + + def prepare_datas_of_distribute(self): + + # prepate datas for 20 tables distributed at different vgroups + tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5") + tdSql.execute(" use testdb ") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(20): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + + for i in range(1,21): + if i ==1 or i == 4: + continue + else: + tbname = "ct"+f'{i}' + for j in range(9): + tdSql.execute( + f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + tdLog.info(" prepare data for distributed_aggregate done! ") + + def check_distribute_datas(self): + # get vgroup_ids of all + tdSql.query("show vgroups ") + vgroups = tdSql.queryResult + + vnode_tables={} + + for vgroup_id in vgroups: + vnode_tables[vgroup_id[0]]=[] + + + # check sub_table of per vnode ,make sure sub_table has been distributed + tdSql.query("show tables like 'ct%'") + table_names = tdSql.queryResult + tablenames = [] + for table_name in table_names: + vnode_tables[table_name[6]].append(table_name[0]) + self.vnode_disbutes = vnode_tables + + count = 0 + for k ,v in vnode_tables.items(): + if len(v)>=2: + count+=1 + if count < 2: + tdLog.exit(" the datas of all not satisfy sub_table has been distributed ") + + def distribute_agg_query(self): + # basic filter + tdSql.query("select apercentile(c1 , 20) from stb1 where c1 is null") + tdSql.checkRows(0) + + tdSql.query("select apercentile(c1 , 20) from stb1 where t1=1") + tdSql.checkData(0,0,2.800000000) + + tdSql.query("select apercentile(c1+c2 ,100) from stb1 where c1 =1 ") + tdSql.checkData(0,0,11112.000000000) + + tdSql.query("select apercentile(c1 ,10 ) from stb1 where tbname=\"ct2\"") + tdSql.checkData(0,0,2.000000000) + + tdSql.query("select apercentile(c1,20) from stb1 partition by tbname") + tdSql.checkRows(20) + + tdSql.query("select apercentile(c1,20) from stb1 where t1> 4 partition by tbname") + tdSql.checkRows(15) + + # union all + tdSql.query("select apercentile(c1,20) from stb1 union all select apercentile(c1,20) from stb1 ") + tdSql.checkRows(2) + tdSql.checkData(0,0,7.389181281) + + # join + + tdSql.execute(" create database if not exists db ") + tdSql.execute(" use db ") + tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") + tdSql.execute(" create table tb1 using st tags(1) ") + tdSql.execute(" create table tb2 using st tags(2) ") + + + for i in range(10): + ts = i*10 + self.ts + tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)") + + tdSql.query("select apercentile(tb1.c1,100), apercentile(tb2.c2,100) from tb1, tb2 where tb1.ts=tb2.ts") + tdSql.checkRows(1) + tdSql.checkData(0,0,9.000000000) + tdSql.checkData(0,0,9.000000000) + + # group by + tdSql.execute(" use testdb ") + tdSql.query(" select max(c1),c1 from stb1 group by t1 ") + tdSql.checkRows(20) + tdSql.query(" select max(c1),c1 from stb1 group by c1 ") + tdSql.checkRows(30) + tdSql.query(" select max(c1),c2 from stb1 group by c2 ") + tdSql.checkRows(31) + + # partition by tbname or partition by tag + tdSql.query("select apercentile(c1 ,10)from stb1 partition by tbname") + query_data = tdSql.queryResult + + # nest query for support max + tdSql.query("select apercentile(c2+2,10)+1 from (select max(c1) c2 from stb1)") + tdSql.checkData(0,0,31.000000000) + tdSql.query("select apercentile(c1+2,10)+1 as c2 from (select ts ,c1 ,c2 from stb1)") + tdSql.checkData(0,0,7.560701700) + tdSql.query("select apercentile(a+2,10)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)") + tdSql.checkData(0,0,7.560701700) + + # mixup with other functions + tdSql.query("select max(c1),count(c1),last(c2,c3),spread(c1), apercentile(c1,10) from stb1") + tdSql.checkData(0,0,28) + tdSql.checkData(0,1,184) + tdSql.checkData(0,2,-99999) + tdSql.checkData(0,3,-999) + tdSql.checkData(0,4,28.000000000) + tdSql.checkData(0,5,4.560701700) + + def run(self): + + self.prepare_datas_of_distribute() + self.check_distribute_datas() + self.distribute_agg_query() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/distribute_agg_count.py b/tests/system-test/2-query/distribute_agg_count.py new file mode 100644 index 0000000000..2ac9c86df0 --- /dev/null +++ b/tests/system-test/2-query/distribute_agg_count.py @@ -0,0 +1,296 @@ +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np +import random + + +class TDTestCase: + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, + "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143, + "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.vnode_disbutes = None + self.ts = 1537146000000 + + + def check_count_functions(self, tbname , col_name): + + max_sql = f"select count({col_name}) from {tbname};" + + same_sql = f"select sum(c) from (select {col_name} ,1 as c from {tbname} where {col_name} is not null) " + + tdSql.query(max_sql) + max_result = tdSql.queryResult + + tdSql.query(same_sql) + same_result = tdSql.queryResult + + if max_result !=same_result: + tdLog.exit(" count function work not as expected, sql : %s "% max_sql) + else: + tdLog.info(" count function work as expected, sql : %s "% max_sql) + + + def prepare_datas_of_distribute(self): + + # prepate datas for 20 tables distributed at different vgroups + tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5") + tdSql.execute(" use testdb ") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(20): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + + for i in range(1,21): + if i ==1 or i == 4: + continue + else: + tbname = "ct"+f'{i}' + for j in range(9): + tdSql.execute( + f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + tdLog.info(" prepare data for distributed_aggregate done! ") + + def check_distribute_datas(self): + # get vgroup_ids of all + tdSql.query("show vgroups ") + vgroups = tdSql.queryResult + + vnode_tables={} + + for vgroup_id in vgroups: + vnode_tables[vgroup_id[0]]=[] + + + # check sub_table of per vnode ,make sure sub_table has been distributed + tdSql.query("show tables like 'ct%'") + table_names = tdSql.queryResult + tablenames = [] + for table_name in table_names: + vnode_tables[table_name[6]].append(table_name[0]) + self.vnode_disbutes = vnode_tables + + count = 0 + for k ,v in vnode_tables.items(): + if len(v)>=2: + count+=1 + if count < 2: + tdLog.exit(" the datas of all not satisfy sub_table has been distributed ") + + def check_count_distribute_diff_vnode(self,col_name): + + vgroup_ids = [] + for k ,v in self.vnode_disbutes.items(): + if len(v)>=2: + vgroup_ids.append(k) + + distribute_tbnames = [] + + for vgroup_id in vgroup_ids: + vnode_tables = self.vnode_disbutes[vgroup_id] + distribute_tbnames.append(random.sample(vnode_tables,1)[0]) + tbname_ins = "" + for tbname in distribute_tbnames: + tbname_ins += "'%s' ,"%tbname + + tbname_filters = tbname_ins[:-1] + + max_sql = f"select count({col_name}) from stb1 where tbname in ({tbname_filters});" + + same_sql = f"select sum(c) from (select {col_name} ,1 as c from stb1 where tbname in ({tbname_filters}) and {col_name} is not null) " + + tdSql.query(max_sql) + max_result = tdSql.queryResult + + tdSql.query(same_sql) + same_result = tdSql.queryResult + + if max_result !=same_result: + tdLog.exit(" count function work not as expected, sql : %s "% max_sql) + else: + tdLog.info(" count function work as expected, sql : %s "% max_sql) + + def check_count_status(self): + # check max function work status + + tdSql.query("show tables like 'ct%'") + table_names = tdSql.queryResult + tablenames = [] + for table_name in table_names: + tablenames.append(table_name[0]) + + tdSql.query("desc stb1") + col_names = tdSql.queryResult + + colnames = [] + for col_name in col_names: + if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]: + colnames.append(col_name[0]) + + for tablename in tablenames: + for colname in colnames: + self.check_count_functions(tablename,colname) + + # check max function for different vnode + + for colname in colnames: + if colname.startswith("c"): + self.check_count_distribute_diff_vnode(colname) + else: + # self.check_count_distribute_diff_vnode(colname) # bug for tag + pass + + + def distribute_agg_query(self): + # basic filter + tdSql.query("select count(c1) from stb1 ") + tdSql.checkData(0,0,184) + + tdSql.query("select count(c1) from stb1 where t1=1") + tdSql.checkData(0,0,9) + + tdSql.query("select count(c1+c2) from stb1 where c1 =1 ") + tdSql.checkData(0,0,2) + + tdSql.query("select count(c1) from stb1 where tbname=\"ct2\"") + tdSql.checkData(0,0,9) + + tdSql.query("select count(c1) from stb1 partition by tbname") + tdSql.checkRows(20) + + tdSql.query("select count(c1) from stb1 where t1> 4 partition by tbname") + tdSql.checkRows(15) + + # union all + tdSql.query("select count(c1) from stb1 union all select count(c1) from stb1 ") + tdSql.checkRows(2) + tdSql.checkData(0,0,184) + + # join + + tdSql.execute(" create database if not exists db ") + tdSql.execute(" use db ") + tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") + tdSql.execute(" create table tb1 using st tags(1) ") + tdSql.execute(" create table tb2 using st tags(2) ") + + + for i in range(10): + ts = i*10 + self.ts + tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)") + + tdSql.query("select count(tb1.c1), count(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts") + tdSql.checkRows(1) + tdSql.checkData(0,0,10) + tdSql.checkData(0,1,10) + + # group by + tdSql.execute(" use testdb ") + + tdSql.query(" select count(*) from stb1 ") + tdSql.checkData(0,0,187) + tdSql.query(" select count(*) from stb1 group by t1 ") + tdSql.checkRows(20) + tdSql.query(" select count(*) from stb1 group by c1 ") + tdSql.checkRows(30) + tdSql.query(" select count(*) from stb1 group by c2 ") + tdSql.checkRows(31) + + # partition by tbname or partition by tag + tdSql.query("select max(c1),tbname from stb1 partition by tbname") + query_data = tdSql.queryResult + + for row in query_data: + tbname = row[1] + tdSql.query(" select max(c1) from %s "%tbname) + tdSql.checkData(0,0,row[0]) + + tdSql.query("select max(c1),tbname from stb1 partition by t1") + query_data = tdSql.queryResult + + for row in query_data: + tbname = row[1] + tdSql.query(" select max(c1) from %s "%tbname) + tdSql.checkData(0,0,row[0]) + + # nest query for support max + tdSql.query("select abs(c2+2)+1 from (select count(c1) c2 from stb1)") + tdSql.checkData(0,0,187.000000000) + tdSql.query("select count(c1+2) as c2 from (select ts ,c1 ,c2 from stb1)") + tdSql.checkData(0,0,184) + tdSql.query("select count(a+2) as c2 from (select ts ,abs(c1) a ,c2 from stb1)") + tdSql.checkData(0,0,184) + + # mixup with other functions + tdSql.query("select max(c1),count(c1),last(c2,c3) from stb1") + tdSql.checkData(0,0,28) + tdSql.checkData(0,1,184) + tdSql.checkData(0,2,-99999) + tdSql.checkData(0,3,-999) + + def run(self): + + self.prepare_datas_of_distribute() + self.check_distribute_datas() + self.check_count_status() + self.distribute_agg_query() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/distribute_agg_max.py b/tests/system-test/2-query/distribute_agg_max.py new file mode 100644 index 0000000000..ae0ab5aafa --- /dev/null +++ b/tests/system-test/2-query/distribute_agg_max.py @@ -0,0 +1,293 @@ +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np +import random + + +class TDTestCase: + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, + "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143, + "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.vnode_disbutes = None + self.ts = 1537146000000 + + + def check_max_functions(self, tbname , col_name): + + max_sql = f"select max({col_name}) from {tbname};" + + same_sql = f"select {col_name} from {tbname} order by {col_name} desc limit 1" + + tdSql.query(max_sql) + max_result = tdSql.queryResult + + tdSql.query(same_sql) + same_result = tdSql.queryResult + + if max_result !=same_result: + tdLog.exit(" max function work not as expected, sql : %s "% max_sql) + else: + tdLog.info(" max function work as expected, sql : %s "% max_sql) + + + def prepare_datas_of_distribute(self): + + # prepate datas for 20 tables distributed at different vgroups + tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5") + tdSql.execute(" use testdb ") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(20): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + + for i in range(1,21): + if i ==1 or i == 4: + continue + else: + tbname = "ct"+f'{i}' + for j in range(9): + tdSql.execute( + f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + tdLog.info(" prepare data for distributed_aggregate done! ") + + def check_distribute_datas(self): + # get vgroup_ids of all + tdSql.query("show vgroups ") + vgroups = tdSql.queryResult + + vnode_tables={} + + for vgroup_id in vgroups: + vnode_tables[vgroup_id[0]]=[] + + + # check sub_table of per vnode ,make sure sub_table has been distributed + tdSql.query("show tables like 'ct%'") + table_names = tdSql.queryResult + tablenames = [] + for table_name in table_names: + vnode_tables[table_name[6]].append(table_name[0]) + self.vnode_disbutes = vnode_tables + + count = 0 + for k ,v in vnode_tables.items(): + if len(v)>=2: + count+=1 + if count < 2: + tdLog.exit(" the datas of all not satisfy sub_table has been distributed ") + + def check_max_distribute_diff_vnode(self,col_name): + + vgroup_ids = [] + for k ,v in self.vnode_disbutes.items(): + if len(v)>=2: + vgroup_ids.append(k) + + distribute_tbnames = [] + + for vgroup_id in vgroup_ids: + vnode_tables = self.vnode_disbutes[vgroup_id] + distribute_tbnames.append(random.sample(vnode_tables,1)[0]) + tbname_ins = "" + for tbname in distribute_tbnames: + tbname_ins += "'%s' ,"%tbname + + tbname_filters = tbname_ins[:-1] + + max_sql = f"select max({col_name}) from stb1 where tbname in ({tbname_filters});" + + same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) order by {col_name} desc limit 1" + + tdSql.query(max_sql) + max_result = tdSql.queryResult + + tdSql.query(same_sql) + same_result = tdSql.queryResult + + if max_result !=same_result: + tdLog.exit(" max function work not as expected, sql : %s "% max_sql) + else: + tdLog.info(" max function work as expected, sql : %s "% max_sql) + + def check_max_status(self): + # check max function work status + + tdSql.query("show tables like 'ct%'") + table_names = tdSql.queryResult + tablenames = [] + for table_name in table_names: + tablenames.append(table_name[0]) + + tdSql.query("desc stb1") + col_names = tdSql.queryResult + + colnames = [] + for col_name in col_names: + if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]: + colnames.append(col_name[0]) + + for tablename in tablenames: + for colname in colnames: + self.check_max_functions(tablename,colname) + + # check max function for different vnode + + for colname in colnames: + if colname.startswith("c"): + self.check_max_distribute_diff_vnode(colname) + else: + # self.check_max_distribute_diff_vnode(colname) # bug for tag + pass + + + def distribute_agg_query(self): + # basic filter + tdSql.query("select max(c1) from stb1 where c1 is null") + tdSql.checkRows(0) + + tdSql.query("select max(c1) from stb1 where t1=1") + tdSql.checkData(0,0,10) + + tdSql.query("select max(c1+c2) from stb1 where c1 =1 ") + tdSql.checkData(0,0,11112.000000000) + + tdSql.query("select max(c1) from stb1 where tbname=\"ct2\"") + tdSql.checkData(0,0,10) + + tdSql.query("select max(c1) from stb1 partition by tbname") + tdSql.checkRows(20) + + tdSql.query("select max(c1) from stb1 where t1> 4 partition by tbname") + tdSql.checkRows(15) + + # union all + tdSql.query("select max(c1) from stb1 union all select max(c1) from stb1 ") + tdSql.checkRows(2) + tdSql.checkData(0,0,28) + + # join + + tdSql.execute(" create database if not exists db ") + tdSql.execute(" use db ") + tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") + tdSql.execute(" create table tb1 using st tags(1) ") + tdSql.execute(" create table tb2 using st tags(2) ") + + + for i in range(10): + ts = i*10 + self.ts + tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)") + + tdSql.query("select max(tb1.c1), tb2.c2 from tb1, tb2 where tb1.ts=tb2.ts") + tdSql.checkRows(1) + tdSql.checkData(0,0,9) + tdSql.checkData(0,0,9.00000) + + # group by + tdSql.execute(" use testdb ") + tdSql.query(" select max(c1),c1 from stb1 group by t1 ") + tdSql.checkRows(20) + tdSql.query(" select max(c1),c1 from stb1 group by c1 ") + tdSql.checkRows(30) + tdSql.query(" select max(c1),c2 from stb1 group by c2 ") + tdSql.checkRows(31) + + # partition by tbname or partition by tag + tdSql.query("select max(c1),tbname from stb1 partition by tbname") + query_data = tdSql.queryResult + + for row in query_data: + tbname = row[1] + tdSql.query(" select max(c1) from %s "%tbname) + tdSql.checkData(0,0,row[0]) + + tdSql.query("select max(c1),tbname from stb1 partition by t1") + query_data = tdSql.queryResult + + for row in query_data: + tbname = row[1] + tdSql.query(" select max(c1) from %s "%tbname) + tdSql.checkData(0,0,row[0]) + + # nest query for support max + tdSql.query("select abs(c2+2)+1 from (select max(c1) c2 from stb1)") + tdSql.checkData(0,0,31.000000000) + tdSql.query("select max(c1+2)+1 as c2 from (select ts ,c1 ,c2 from stb1)") + tdSql.checkData(0,0,31.000000000) + tdSql.query("select max(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)") + tdSql.checkData(0,0,31.000000000) + + # mixup with other functions + tdSql.query("select max(c1),count(c1),last(c2,c3) from stb1") + tdSql.checkData(0,0,28) + tdSql.checkData(0,1,184) + tdSql.checkData(0,2,-99999) + tdSql.checkData(0,3,-999) + + def run(self): + + self.prepare_datas_of_distribute() + self.check_distribute_datas() + self.check_max_status() + self.distribute_agg_query() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/distribute_agg_min.py b/tests/system-test/2-query/distribute_agg_min.py new file mode 100644 index 0000000000..8a458c74df --- /dev/null +++ b/tests/system-test/2-query/distribute_agg_min.py @@ -0,0 +1,294 @@ +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np +import random + + +class TDTestCase: + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, + "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143, + "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.vnode_disbutes = None + self.ts = 1537146000000 + + + def check_min_functions(self, tbname , col_name): + + min_sql = f"select min({col_name}) from {tbname};" + + same_sql = f"select {col_name} from {tbname} where {col_name} is not null order by {col_name} asc limit 1" + + tdSql.query(min_sql) + min_result = tdSql.queryResult + + tdSql.query(same_sql) + same_result = tdSql.queryResult + + if min_result !=same_result: + tdLog.exit(" min function work not as expected, sql : %s "% min_sql) + else: + tdLog.info(" min function work as expected, sql : %s "% min_sql) + + + def prepare_datas_of_distribute(self): + + # prepate datas for 20 tables distributed at different vgroups + tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5") + tdSql.execute(" use testdb ") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(20): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + + for i in range(1,21): + if i ==1 or i == 4: + continue + else: + tbname = "ct"+f'{i}' + for j in range(9): + tdSql.execute( + f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + tdLog.info(" prepare data for distributed_aggregate done! ") + + def check_distribute_datas(self): + # get vgroup_ids of all + tdSql.query("show vgroups ") + vgroups = tdSql.queryResult + + vnode_tables={} + + for vgroup_id in vgroups: + vnode_tables[vgroup_id[0]]=[] + + + # check sub_table of per vnode ,make sure sub_table has been distributed + tdSql.query("show tables like 'ct%'") + table_names = tdSql.queryResult + tablenames = [] + for table_name in table_names: + vnode_tables[table_name[6]].append(table_name[0]) + self.vnode_disbutes = vnode_tables + + count = 0 + for k ,v in vnode_tables.items(): + if len(v)>=2: + count+=1 + if count < 2: + tdLog.exit(" the datas of all not satisfy sub_table has been distributed ") + + def check_min_distribute_diff_vnode(self,col_name): + + vgroup_ids = [] + for k ,v in self.vnode_disbutes.items(): + if len(v)>=2: + vgroup_ids.append(k) + + distribute_tbnames = [] + + for vgroup_id in vgroup_ids: + vnode_tables = self.vnode_disbutes[vgroup_id] + distribute_tbnames.append(random.sample(vnode_tables,1)[0]) + tbname_ins = "" + for tbname in distribute_tbnames: + tbname_ins += "'%s' ,"%tbname + + tbname_filters = tbname_ins[:-1] + + min_sql = f"select min({col_name}) from stb1 where tbname in ({tbname_filters});" + + same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) and {col_name} is not null order by {col_name} asc limit 1" + + tdSql.query(min_sql) + min_result = tdSql.queryResult + + tdSql.query(same_sql) + same_result = tdSql.queryResult + + if min_result !=same_result: + tdLog.exit(" min function work not as expected, sql : %s "% min_sql) + else: + tdLog.info(" min function work as expected, sql : %s "% min_sql) + + def check_min_status(self): + # check max function work status + + tdSql.query("show tables like 'ct%'") + table_names = tdSql.queryResult + tablenames = [] + for table_name in table_names: + tablenames.append(table_name[0]) + + tdSql.query("desc stb1") + col_names = tdSql.queryResult + + colnames = [] + for col_name in col_names: + if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]: + colnames.append(col_name[0]) + + for tablename in tablenames: + for colname in colnames: + self.check_min_functions(tablename,colname) + + # check max function for different vnode + + for colname in colnames: + if colname.startswith("c"): + self.check_min_distribute_diff_vnode(colname) + else: + # self.check_min_distribute_diff_vnode(colname) # bug for tag + pass + + + def distribute_agg_query(self): + # basic filter + tdSql.query("select min(c1) from stb1 where c1 is null") + tdSql.checkRows(0) + + tdSql.query("select min(c1) from stb1 where t1=1") + tdSql.checkData(0,0,2) + + tdSql.query("select min(c1+c2) from stb1 where c1 =1 ") + tdSql.checkData(0,0,11112.000000000) + + tdSql.query("select min(c1) from stb1 where tbname=\"ct2\"") + tdSql.checkData(0,0,2) + + tdSql.query("select min(c1) from stb1 partition by tbname") + tdSql.checkRows(20) + + tdSql.query("select min(c1) from stb1 where t1> 4 partition by tbname") + tdSql.checkRows(15) + + # union all + tdSql.query("select min(c1) from stb1 union all select min(c1) from stb1 ") + tdSql.checkRows(2) + tdSql.checkData(0,0,0) + + # join + + tdSql.execute(" create database if not exists db ") + tdSql.execute(" use db ") + tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") + tdSql.execute(" create table tb1 using st tags(1) ") + tdSql.execute(" create table tb2 using st tags(2) ") + + + for i in range(10): + ts = i*10 + self.ts + tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)") + + tdSql.query("select min(tb1.c1), tb2.c2 from tb1, tb2 where tb1.ts=tb2.ts") + tdSql.checkRows(1) + tdSql.checkData(0,0,0) + tdSql.checkData(0,0,0.00000) + + # group by + tdSql.execute(" use testdb ") + tdSql.query(" select min(c1),c1 from stb1 group by t1 ") + tdSql.checkRows(20) + tdSql.query(" select min(c1),c1 from stb1 group by c1 ") + tdSql.checkRows(30) + tdSql.query(" select min(c1),c2 from stb1 group by c2 ") + tdSql.checkRows(31) + + # partition by tbname or partition by tag + tdSql.query("select min(c1),tbname from stb1 partition by tbname") + query_data = tdSql.queryResult + + for row in query_data: + tbname = row[1] + tdSql.query(" select min(c1) from %s "%tbname) + tdSql.checkData(0,0,row[0]) + + tdSql.query("select min(c1),tbname from stb1 partition by t1") + query_data = tdSql.queryResult + + for row in query_data: + tbname = row[1] + tdSql.query(" select min(c1) from %s "%tbname) + tdSql.checkData(0,0,row[0]) + + # nest query for support max + tdSql.query("select abs(c2+2)+1 from (select min(c1) c2 from stb1)") + tdSql.checkData(0,0,3.000000000) + tdSql.query("select min(c1+2)+1 as c2 from (select ts ,c1 ,c2 from stb1)") + tdSql.checkData(0,0,3.000000000) + tdSql.query("select min(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)") + tdSql.checkData(0,0,3.000000000) + + # mixup with other functions + tdSql.query("select max(c1),count(c1),last(c2,c3),min(c1) from stb1") + tdSql.checkData(0,0,28) + tdSql.checkData(0,1,184) + tdSql.checkData(0,2,-99999) + tdSql.checkData(0,3,-999) + tdSql.checkData(0,4,0) + + def run(self): + + self.prepare_datas_of_distribute() + self.check_distribute_datas() + self.check_min_status() + self.distribute_agg_query() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/distribute_agg_spread.py b/tests/system-test/2-query/distribute_agg_spread.py new file mode 100644 index 0000000000..94f1a61d77 --- /dev/null +++ b/tests/system-test/2-query/distribute_agg_spread.py @@ -0,0 +1,281 @@ +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np +import random + + +class TDTestCase: + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, + "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143, + "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.vnode_disbutes = None + self.ts = 1537146000000 + + + def check_spread_functions(self, tbname , col_name): + + spread_sql = f"select spread({col_name}) from {tbname};" + + same_sql = f"select max({col_name})-min({col_name}) from {tbname}" + + tdSql.query(spread_sql) + spread_result = tdSql.queryResult + + tdSql.query(same_sql) + same_result = tdSql.queryResult + + if spread_result !=same_result: + tdLog.exit(" max function work not as expected, sql : %s "% spread_sql) + else: + tdLog.info(" max function work as expected, sql : %s "% spread_sql) + + + def prepare_datas_of_distribute(self): + + # prepate datas for 20 tables distributed at different vgroups + tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5") + tdSql.execute(" use testdb ") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(20): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + + for i in range(1,21): + if i ==1 or i == 4: + continue + else: + tbname = "ct"+f'{i}' + for j in range(9): + tdSql.execute( + f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + tdLog.info(" prepare data for distributed_aggregate done! ") + + def check_distribute_datas(self): + # get vgroup_ids of all + tdSql.query("show vgroups ") + vgroups = tdSql.queryResult + + vnode_tables={} + + for vgroup_id in vgroups: + vnode_tables[vgroup_id[0]]=[] + + + # check sub_table of per vnode ,make sure sub_table has been distributed + tdSql.query("show tables like 'ct%'") + table_names = tdSql.queryResult + tablenames = [] + for table_name in table_names: + vnode_tables[table_name[6]].append(table_name[0]) + self.vnode_disbutes = vnode_tables + + count = 0 + for k ,v in vnode_tables.items(): + if len(v)>=2: + count+=1 + if count < 2: + tdLog.exit(" the datas of all not satisfy sub_table has been distributed ") + + def check_spread_distribute_diff_vnode(self,col_name): + + vgroup_ids = [] + for k ,v in self.vnode_disbutes.items(): + if len(v)>=2: + vgroup_ids.append(k) + + distribute_tbnames = [] + + for vgroup_id in vgroup_ids: + vnode_tables = self.vnode_disbutes[vgroup_id] + distribute_tbnames.append(random.sample(vnode_tables,1)[0]) + tbname_ins = "" + for tbname in distribute_tbnames: + tbname_ins += "'%s' ,"%tbname + + tbname_filters = tbname_ins[:-1] + + spread_sql = f"select spread({col_name}) from stb1 where tbname in ({tbname_filters})" + + same_sql = f"select max({col_name}) - min({col_name}) from stb1 where tbname in ({tbname_filters})" + + tdSql.query(spread_sql) + spread_result = tdSql.queryResult + + tdSql.query(same_sql) + same_result = tdSql.queryResult + + if spread_result !=same_result: + tdLog.exit(" spread function work not as expected, sql : %s "% spread_sql) + else: + tdLog.info(" spread function work as expected, sql : %s "% spread_sql) + + def check_spread_status(self): + # check max function work status + + tdSql.query("show tables like 'ct%'") + table_names = tdSql.queryResult + tablenames = [] + for table_name in table_names: + tablenames.append(table_name[0]) + + tdSql.query("desc stb1") + col_names = tdSql.queryResult + + colnames = [] + for col_name in col_names: + if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]: + colnames.append(col_name[0]) + + for tablename in tablenames: + for colname in colnames: + self.check_spread_functions(tablename,colname) + + # check max function for different vnode + + for colname in colnames: + if colname.startswith("c"): + self.check_spread_distribute_diff_vnode(colname) + else: + # self.check_spread_distribute_diff_vnode(colname) # bug for tag + pass + + + def distribute_agg_query(self): + # basic filter + tdSql.query("select spread(c1) from stb1 where c1 is null") + tdSql.checkRows(0) + + tdSql.query("select spread(c1) from stb1 where t1=1") + tdSql.checkData(0,0,8.000000000) + + tdSql.query("select spread(c1+c2) from stb1 where c1 =1 ") + tdSql.checkData(0,0,0.000000000) + + tdSql.query("select spread(c1) from stb1 where tbname=\"ct2\"") + tdSql.checkData(0,0,8.000000000) + + tdSql.query("select spread(c1) from stb1 partition by tbname") + tdSql.checkRows(20) + + tdSql.query("select spread(c1) from stb1 where t1> 4 partition by tbname") + tdSql.checkRows(15) + + # union all + tdSql.query("select spread(c1) from stb1 union all select max(c1)-min(c1) from stb1 ") + tdSql.checkRows(2) + tdSql.checkData(0,0,28.000000000) + + # join + + tdSql.execute(" create database if not exists db ") + tdSql.execute(" use db ") + tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") + tdSql.execute(" create table tb1 using st tags(1) ") + tdSql.execute(" create table tb2 using st tags(2) ") + + + for i in range(10): + ts = i*10 + self.ts + tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)") + + tdSql.query("select spread(tb1.c1), spread(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts") + tdSql.checkRows(1) + tdSql.checkData(0,0,9.000000000) + tdSql.checkData(0,0,9.00000) + + # group by + tdSql.execute(" use testdb ") + tdSql.query(" select max(c1),c1 from stb1 group by t1 ") + tdSql.checkRows(20) + tdSql.query(" select max(c1),c1 from stb1 group by c1 ") + tdSql.checkRows(30) + tdSql.query(" select max(c1),c2 from stb1 group by c2 ") + tdSql.checkRows(31) + + # partition by tbname or partition by tag + tdSql.query("select spread(c1) from stb1 partition by tbname") + query_data = tdSql.queryResult + + # nest query for support max + tdSql.query("select spread(c2+2)+1 from (select max(c1) c2 from stb1)") + tdSql.checkData(0,0,1.000000000) + tdSql.query("select spread(c1+2)+1 as c2 from (select ts ,c1 ,c2 from stb1)") + tdSql.checkData(0,0,29.000000000) + tdSql.query("select spread(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)") + tdSql.checkData(0,0,29.000000000) + + # mixup with other functions + tdSql.query("select max(c1),count(c1),last(c2,c3),spread(c1) from stb1") + tdSql.checkData(0,0,28) + tdSql.checkData(0,1,184) + tdSql.checkData(0,2,-99999) + tdSql.checkData(0,3,-999) + tdSql.checkData(0,4,28.000000000) + + def run(self): + + self.prepare_datas_of_distribute() + self.check_distribute_datas() + self.check_spread_status() + self.distribute_agg_query() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/distribute_agg_sum.py b/tests/system-test/2-query/distribute_agg_sum.py new file mode 100644 index 0000000000..add4d75c61 --- /dev/null +++ b/tests/system-test/2-query/distribute_agg_sum.py @@ -0,0 +1,278 @@ +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np +import random ,os ,sys +import platform + + +class TDTestCase: + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, + "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143, + "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.vnode_disbutes = None + self.ts = 1537146000000 + + + def check_sum_functions(self, tbname , col_name): + + sum_sql = f"select sum({col_name}) from {tbname};" + + same_sql = f"select {col_name} from {tbname} where {col_name} is not null " + + tdSql.query(same_sql) + pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + if (platform.system().lower() == 'windows' and pre_data.dtype == 'int32'): + pre_data = np.array(pre_data, dtype = 'int64') + pre_sum = np.sum(pre_data) + + tdSql.query(sum_sql) + tdSql.checkData(0,0,pre_sum) + + def prepare_datas_of_distribute(self): + + # prepate datas for 20 tables distributed at different vgroups + tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5") + tdSql.execute(" use testdb ") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(20): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + + for i in range(1,21): + if i ==1 or i == 4: + continue + else: + tbname = "ct"+f'{i}' + for j in range(9): + tdSql.execute( + f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + tdLog.info(" prepare data for distributed_aggregate done! ") + + def check_distribute_datas(self): + # get vgroup_ids of all + tdSql.query("show vgroups ") + vgroups = tdSql.queryResult + + vnode_tables={} + + for vgroup_id in vgroups: + vnode_tables[vgroup_id[0]]=[] + + + # check sub_table of per vnode ,make sure sub_table has been distributed + tdSql.query("show tables like 'ct%'") + table_names = tdSql.queryResult + tablenames = [] + for table_name in table_names: + vnode_tables[table_name[6]].append(table_name[0]) + self.vnode_disbutes = vnode_tables + + count = 0 + for k ,v in vnode_tables.items(): + if len(v)>=2: + count+=1 + if count < 2: + tdLog.exit(" the datas of all not satisfy sub_table has been distributed ") + + def check_sum_distribute_diff_vnode(self,col_name): + + vgroup_ids = [] + for k ,v in self.vnode_disbutes.items(): + if len(v)>=2: + vgroup_ids.append(k) + + distribute_tbnames = [] + + for vgroup_id in vgroup_ids: + vnode_tables = self.vnode_disbutes[vgroup_id] + distribute_tbnames.append(random.sample(vnode_tables,1)[0]) + tbname_ins = "" + for tbname in distribute_tbnames: + tbname_ins += "'%s' ,"%tbname + + tbname_filters = tbname_ins[:-1] + + sum_sql = f"select sum({col_name}) from stb1 where tbname in ({tbname_filters});" + + same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) and {col_name} is not null " + + tdSql.query(same_sql) + pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + if (platform.system().lower() == 'windows' and pre_data.dtype == 'int32'): + pre_data = np.array(pre_data, dtype = 'int64') + pre_sum = np.sum(pre_data) + + tdSql.query(sum_sql) + tdSql.checkData(0,0,pre_sum) + + def check_sum_status(self): + # check max function work status + + tdSql.query("show tables like 'ct%'") + table_names = tdSql.queryResult + tablenames = [] + for table_name in table_names: + tablenames.append(table_name[0]) + + tdSql.query("desc stb1") + col_names = tdSql.queryResult + + colnames = [] + for col_name in col_names: + if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]: + colnames.append(col_name[0]) + + for tablename in tablenames: + for colname in colnames: + self.check_sum_functions(tablename,colname) + + # check max function for different vnode + + for colname in colnames: + if colname.startswith("c"): + self.check_sum_distribute_diff_vnode(colname) + else: + # self.check_sum_distribute_diff_vnode(colname) # bug for tag + pass + + + def distribute_agg_query(self): + # basic filter + tdSql.query(" select sum(c1) from stb1 ") + tdSql.checkData(0,0,2592) + + tdSql.query(" select sum(a) from (select sum(c1) a from stb1 partition by tbname) ") + tdSql.checkData(0,0,2592) + + tdSql.query(" select sum(c1) from stb1 where t1=1") + tdSql.checkData(0,0,54) + + tdSql.query("select sum(c1+c2) from stb1 where c1 =1 ") + tdSql.checkData(0,0,22224.000000000) + + tdSql.query("select sum(c1) from stb1 where tbname=\"ct2\"") + tdSql.checkData(0,0,54) + + tdSql.query("select sum(c1) from stb1 partition by tbname") + tdSql.checkRows(20) + + tdSql.query("select sum(c1) from stb1 where t1> 4 partition by tbname") + tdSql.checkRows(15) + + # union all + tdSql.query("select sum(c1) from stb1 union all select sum(c1) from stb1 ") + tdSql.checkRows(2) + tdSql.checkData(0,0,2592) + + tdSql.query("select sum(a) from (select sum(c1) a from stb1 union all select sum(c1) a from stb1)") + tdSql.checkRows(1) + tdSql.checkData(0,0,5184) + + # join + + tdSql.execute(" create database if not exists db ") + tdSql.execute(" use db ") + tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") + tdSql.execute(" create table tb1 using st tags(1) ") + tdSql.execute(" create table tb2 using st tags(2) ") + + + for i in range(10): + ts = i*10 + self.ts + tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)") + + tdSql.query("select sum(tb1.c1), sum(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts") + tdSql.checkRows(1) + tdSql.checkData(0,0,45) + tdSql.checkData(0,1,45.000000000) + + # group by + tdSql.execute(" use testdb ") + + # partition by tbname or partition by tag + tdSql.query("select sum(c1) from stb1 partition by tbname") + tdSql.checkRows(20) + + # nest query for support max + tdSql.query("select abs(c2+2)+1 from (select sum(c1) c2 from stb1)") + tdSql.checkData(0,0,2595.000000000) + tdSql.query("select sum(c1+2) as c2 from (select ts ,c1 ,c2 from stb1)") + tdSql.checkData(0,0,2960.000000000) + tdSql.query("select sum(a+2) as c2 from (select ts ,abs(c1) a ,c2 from stb1)") + tdSql.checkData(0,0,2960.000000000) + + # mixup with other functions + tdSql.query("select max(c1),count(c1),last(c2,c3),sum(c1+c2) from stb1") + tdSql.checkData(0,0,28) + tdSql.checkData(0,1,184) + tdSql.checkData(0,2,-99999) + tdSql.checkData(0,3,-999) + tdSql.checkData(0,4,28202310.000000000) + + def run(self): + + self.prepare_datas_of_distribute() + self.check_distribute_datas() + self.check_sum_status() + self.distribute_agg_query() + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/first.py b/tests/system-test/2-query/first.py index 7227d1afb5..e9a8cc950b 100644 --- a/tests/system-test/2-query/first.py +++ b/tests/system-test/2-query/first.py @@ -11,8 +11,11 @@ # -*- coding: utf-8 -*- +import random +import string import sys import taos +from util.common import * from util.log import * from util.cases import * from util.sql import * @@ -25,124 +28,159 @@ class TDTestCase: tdSql.init(conn.cursor()) self.rowNum = 10 + self.tbnum = 20 self.ts = 1537146000000 - - def run(self): + self.binary_str = 'taosdata' + self.nchar_str = '涛思数据' + + def first_check_base(self): tdSql.prepare() - - tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') - tdSql.execute("create table test1 using test tags('beijing')") - tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1)) - - # first verifacation - # bug TD-15957 - tdSql.query("select first(*) from test1") - tdSql.checkRows(1) - tdSql.checkData(0, 1, None) - - tdSql.query("select first(col1) from test1") - tdSql.checkRows(0) - - tdSql.query("select first(col2) from test1") - tdSql.checkRows(0) - - tdSql.query("select first(col3) from test1") - tdSql.checkRows(0) - - tdSql.query("select first(col4) from test1") - tdSql.checkRows(0) - - tdSql.query("select first(col11) from test1") - tdSql.checkRows(0) - - tdSql.query("select first(col12) from test1") - tdSql.checkRows(0) - - tdSql.query("select first(col13) from test1") - tdSql.checkRows(0) - - tdSql.query("select first(col14) from test1") - tdSql.checkRows(0) - - tdSql.query("select first(col5) from test1") - tdSql.checkRows(0) - - tdSql.query("select first(col6) from test1") - tdSql.checkRows(0) - - tdSql.query("select first(col7) from test1") - tdSql.checkRows(0) - - tdSql.query("select first(col8) from test1") - tdSql.checkRows(0) - - tdSql.query("select first(col9) from test1") - tdSql.checkRows(0) - + column_dict = { + 'col1': 'tinyint', + 'col2': 'smallint', + 'col3': 'int', + 'col4': 'bigint', + 'col5': 'tinyint unsigned', + 'col6': 'smallint unsigned', + 'col7': 'int unsigned', + 'col8': 'bigint unsigned', + 'col9': 'float', + 'col10': 'double', + 'col11': 'bool', + 'col12': 'binary(20)', + 'col13': 'nchar(20)' + } + tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, + col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''') + tdSql.execute("create table stb_1 using stb tags('beijing')") + tdSql.execute("insert into stb_1(ts) values(%d)" % (self.ts - 1)) + column_list = ['col1','col2','col3','col4','col5','col6','col7','col8','col9','col10','col11','col12','col13'] + for i in ['stb_1','db.stb_1','stb_1','db.stb_1']: + tdSql.query(f"select first(*) from {i}") + tdSql.checkRows(1) + tdSql.checkData(0, 1, None) + #!bug TD-16561 + # for i in ['stb','db.stb']: + # tdSql.query(f"select first(*) from {i}") + # tdSql.checkRows(1) + # tdSql.checkData(0, 1, None) + for i in column_list: + for j in ['stb_1','db.stb_1','stb_1','db.stb_1']: + tdSql.query(f"select first({i}) from {j}") + tdSql.checkRows(0) for i in range(self.rowNum): - tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - - tdSql.query("select first(*) from test1") - tdSql.checkRows(1) - tdSql.checkData(0, 1, 1) - - tdSql.query("select first(col1) from test1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 1) - - tdSql.query("select first(col2) from test1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 1) - - tdSql.query("select first(col3) from test1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 1) - - tdSql.query("select first(col4) from test1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 1) - - tdSql.query("select first(col11) from test1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 1) - - tdSql.query("select first(col12) from test1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 1) - - tdSql.query("select first(col13) from test1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 1) - - tdSql.query("select first(col14) from test1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 1) - - tdSql.query("select first(col5) from test1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 0.1) - - tdSql.query("select first(col6) from test1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 0.1) - - tdSql.query("select first(col7) from test1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, False) - - tdSql.query("select first(col8) from test1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 'taosdata1') - - tdSql.query("select first(col9) from test1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, '涛思数据1') - - - tdSql.query("select first(*),last(*) from test1 where ts < 23 interval(1s)") + tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + for k, v in column_dict.items(): + for j in ['stb_1', 'db.stb_1', 'stb', 'db.stb']: + tdSql.query(f"select first({k}) from {j}") + tdSql.checkRows(1) + # tinyint,smallint,int,bigint,tinyint unsigned,smallint unsigned,int unsigned,bigint unsigned + if v == 'tinyint' or v == 'smallint' or v == 'int' or v == 'bigint' or v == 'tinyint unsigned' or v == 'smallint unsigned'\ + or v == 'int unsigned' or v == 'bigint unsigned': + tdSql.checkData(0, 0, 1) + # float,double + elif v == 'float' or v == 'double': + tdSql.checkData(0, 0, 0.1) + # bool + elif v == 'bool': + tdSql.checkData(0, 0, False) + # binary + elif 'binary' in v: + tdSql.checkData(0, 0, f'{self.binary_str}1') + # nchar + elif 'nchar' in v: + tdSql.checkData(0, 0, f'{self.nchar_str}1') + #!bug TD-16569 + tdSql.query("select first(*),last(*) from stb where ts < 23 interval(1s)") tdSql.checkRows(0) + tdSql.execute('drop database db') + def first_check_stb_distribute(self): + # prepare data for vgroup 4 + dbname = tdCom.getLongName(10, "letters") + stbname = tdCom.getLongName(5, "letters") + child_table_num = 20 + vgroup = 2 + column_dict = { + 'col1': 'tinyint', + 'col2': 'smallint', + 'col3': 'int', + 'col4': 'bigint', + 'col5': 'tinyint unsigned', + 'col6': 'smallint unsigned', + 'col7': 'int unsigned', + 'col8': 'bigint unsigned', + 'col9': 'float', + 'col10': 'double', + 'col11': 'bool', + 'col12': 'binary(20)', + 'col13': 'nchar(20)' + } + tdSql.execute(f"create database if not exists {dbname} vgroups {vgroup}") + tdSql.execute(f'use {dbname}') + # build 20 child tables,every table insert 10 rows + tdSql.execute(f'''create table {stbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, + col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''') + for i in range(child_table_num): + tdSql.execute(f"create table {stbname}_{i} using {stbname} tags('beijing')") + tdSql.execute(f"insert into {stbname}_{i}(ts) values(%d)" % (self.ts - 1-i)) + #!bug TD-16561 + # for i in [f'{stbname}', f'{dbname}.{stbname}']: + # tdSql.query(f"select first(*) from {i}") + # tdSql.checkRows(1) + # tdSql.checkData(0, 1, None) + tdSql.query('show tables') + vgroup_list = [] + for i in range(len(tdSql.queryResult)): + vgroup_list.append(tdSql.queryResult[i][6]) + vgroup_list_set = set(vgroup_list) + # print(vgroup_list_set) + # print(vgroup_list) + for i in vgroup_list_set: + vgroups_num = vgroup_list.count(i) + if vgroups_num >=2: + tdLog.info(f'This scene with {vgroups_num} vgroups is ok!') + continue + else: + tdLog.exit('This scene does not meet the requirements with {vgroups_num} vgroup!\n') + + for i in range(child_table_num): + for j in range(self.rowNum): + tdSql.execute(f"insert into {stbname}_{i} values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" + % (self.ts + j + i, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 0.1, j + 0.1, j % 2, j + 1, j + 1)) + + for k, v in column_dict.items(): + for j in [f'{stbname}_{i}', f'{dbname}.{stbname}_{i}', f'{stbname}', f'{dbname}.{stbname}']: + tdSql.query(f"select first({k}) from {j}") + tdSql.checkRows(1) + # tinyint,smallint,int,bigint,tinyint unsigned,smallint unsigned,int unsigned,bigint unsigned + if v == 'tinyint' or v == 'smallint' or v == 'int' or v == 'bigint' or v == 'tinyint unsigned' or v == 'smallint unsigned'\ + or v == 'int unsigned' or v == 'bigint unsigned': + tdSql.checkData(0, 0, 1) + # float,double + elif v == 'float' or v == 'double': + tdSql.checkData(0, 0, 0.1) + # bool + elif v == 'bool': + tdSql.checkData(0, 0, False) + # binary + elif 'binary' in v: + tdSql.checkData(0, 0, f'{self.binary_str}1') + # nchar + elif 'nchar' in v: + tdSql.checkData(0, 0, f'{self.nchar_str}1') + #!bug TD-16569 + # tdSql.query(f"select first(*),last(*) from {stbname} where ts < 23 interval(1s)") + # tdSql.checkRows(0) + tdSql.execute(f'drop database {dbname}') + + + + pass + def run(self): + self.first_check_base() + self.first_check_stb_distribute() + def stop(self): tdSql.close() diff --git a/tests/system-test/2-query/histogram.py b/tests/system-test/2-query/histogram.py index 0448952be8..bc061f8fb7 100644 --- a/tests/system-test/2-query/histogram.py +++ b/tests/system-test/2-query/histogram.py @@ -44,8 +44,7 @@ class TDTestCase: buildPath = root[:len(root) - len("/build/bin")] break return buildPath - - def run(self): + def histogram_check_base(self): print("running {}".format(__file__)) tdSql.execute("drop database if exists db") tdSql.execute("create database if not exists db") @@ -3183,6 +3182,72 @@ class TDTestCase: tdSql.execute('drop database db') + + def histogram_check_distribute(self): + dbname = "db" + stbname = "stb" + row_num = 10 + child_table_num = 20 + vgroups = 2 + user_input_json = "[1,3,5,7]" + ts = 1537146000000 + binary_str = 'taosdata' + nchar_str = '涛思数据' + column_dict = { + 'ts' : 'timestamp', + 'col1' : 'tinyint', + 'col2' : 'smallint', + 'col3' : 'int', + 'col4' : 'bigint', + 'col5' : 'tinyint unsigned', + 'col6' : 'smallint unsigned', + 'col7' : 'int unsigned', + 'col8' : 'bigint unsigned', + 'col9' : 'float', + 'col10': 'double', + 'col11': 'bool', + 'col12': 'binary(20)', + 'col13': 'nchar(20)' + } + tdSql.execute(f"create database if not exists {dbname} vgroups {vgroups}") + tdSql.execute(f'use {dbname}') + # build 20 child tables,every table insert 10 rows + tdSql.execute(f'''create table {stbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, + col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''') + for i in range(child_table_num): + tdSql.execute(f"create table {stbname}_{i} using {stbname} tags('beijing')") + tdSql.query('show tables') + vgroup_list = [] + for i in range(len(tdSql.queryResult)): + vgroup_list.append(tdSql.queryResult[i][6]) + vgroup_list_set = set(vgroup_list) + for i in vgroup_list_set: + vgroups_num = vgroup_list.count(i) + if vgroups_num >=2: + tdLog.info(f'This scene with {vgroups_num} vgroups is ok!') + continue + else: + tdLog.exit(f'This scene does not meet the requirements with {vgroups_num} vgroup!\n') + for i in range(child_table_num): + for j in range(row_num): + tdSql.execute(f"insert into {stbname}_{i} values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{binary_str}%d', '{nchar_str}%d')" + % (ts + j + i, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 0.1, j + 0.1, j % 2, j + 1, j + 1)) + # user_input + for k,v in column_dict.items(): + if v.lower() == 'tinyint' or v.lower() == 'smallint' or v.lower() == 'int' or v.lower() == 'bigint' or v.lower() =='float' or v.lower() =='double'\ + or v.lower() =='tinyint unsigned' or v.lower() =='smallint unsigned' or v.lower() =='int unsigned' or v.lower() =='bigint unsigned': + tdSql.query(f'select histogram({k}, "user_input", "{user_input_json}", 0) from {stbname}') + tdSql.checkRows(len(user_input_json[1:-1].split(','))-1) + elif 'binary' in v.lower() or 'nchar' in v.lower() or 'bool' == v.lower(): + tdSql.error(f'select histogram({k}, "user_input", "{user_input_json}", 0) from {stbname}') + + tdSql.execute(f'drop database {dbname}') + + + def run(self): + self.histogram_check_base() + self.histogram_check_distribute() + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/2-query/hyperloglog.py b/tests/system-test/2-query/hyperloglog.py index 337db140a1..8dd6bd2dda 100644 --- a/tests/system-test/2-query/hyperloglog.py +++ b/tests/system-test/2-query/hyperloglog.py @@ -214,6 +214,79 @@ class TDTestCase: for i in range(4): tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2} + def __create_stable(self,stbname='stb',column_dict={'ts':'timestamp','col1': 'tinyint','col2': 'smallint','col3': 'int', + 'col4': 'bigint','col5': 'tinyint unsigned','col6': 'smallint unsigned','col7': 'int unsigned', + 'col8': 'bigint unsigned','col9': 'float','col10': 'double','col11': 'bool','col12': 'binary(20)','col13': 'nchar(20)'}, + tag_dict={'ts_tag':'timestamp','t1': 'tinyint','t2': 'smallint','t3': 'int', + 't4': 'bigint','t5': 'tinyint unsigned','t6': 'smallint unsigned','t7': 'int unsigned', + 't8': 'bigint unsigned','t9': 'float','t10': 'double','t11': 'bool','t12': 'binary(20)','t13': 'nchar(20)'}): + column_sql = '' + tag_sql = '' + for k,v in column_dict.items(): + column_sql += f"{k} {v}," + for k,v in tag_dict.items(): + tag_sql += f"{k} {v}," + tdSql.execute(f'create table if not exists {stbname} ({column_sql[:-1]}) tags({tag_sql[:-1]})') + + def __insert_data(self): + + pass + + def __hyperloglog_check_distribute(self): + dbname = "dbtest" + stbname = "stb" + childtable_num = 20 + vgroups_num = 4 + row_num = 10 + ts = 1537146000000 + binary_str = 'taosdata' + nchar_str = '涛思数据' + column_dict = { + 'ts':'timestamp', + 'col1': 'tinyint', + 'col2': 'smallint', + 'col3': 'int', + 'col4': 'bigint', + 'col5': 'tinyint unsigned', + 'col6': 'smallint unsigned', + 'col7': 'int unsigned', + 'col8': 'bigint unsigned', + 'col9': 'float', + 'col10': 'double', + 'col11': 'bool', + 'col12': 'binary(20)', + 'col13': 'nchar(20)' + } + tag_dict = { + 'loc':'nchar(20)' + } + tdSql.execute(f"create database if not exists {dbname} vgroups {vgroups_num}") + tdSql.execute(f'use {dbname}') + self.__create_stable(stbname,column_dict,tag_dict) + for i in range(childtable_num): + tdSql.execute(f"create table {stbname}_{i} using {stbname} tags('beijing')") + tdSql.query('show tables') + vgroup_list = [] + for i in range(len(tdSql.queryResult)): + vgroup_list.append(tdSql.queryResult[i][6]) + vgroup_list_set = set(vgroup_list) + for i in vgroup_list_set: + vgroups_num = vgroup_list.count(i) + if vgroups_num >=2: + tdLog.info(f'This scene with {vgroups_num} vgroups is ok!') + continue + else: + tdLog.exit('This scene does not meet the requirements with {vgroups_num} vgroup!\n') + for i in range(row_num): + tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{binary_str}%d', '{nchar_str}%d')" + % (ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + for k in column_dict.keys(): + tdSql.query(f"select hyperloglog({k}) from {stbname}") + tdSql.checkRows(1) + tdSql.query(f"select hyperloglog({k}) from {stbname} group by {k}") + + tdSql.execute(f'drop database {dbname}') + def __insert_data(self, rows): now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) @@ -311,6 +384,10 @@ class TDTestCase: tdLog.printNoPrefix("==========step4:after wal, all check again ") self.all_test() + tdLog.printNoPrefix("==========step5: distribute scene check") + self.__hyperloglog_check_distribute() + + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/2-query/last.py b/tests/system-test/2-query/last.py index 4ef13e9142..ee65d22a22 100644 --- a/tests/system-test/2-query/last.py +++ b/tests/system-test/2-query/last.py @@ -1,6 +1,9 @@ +import random +import string from util.log import * from util.cases import * from util.sql import * +from util.common import * import numpy as np @@ -10,416 +13,258 @@ class TDTestCase: tdSql.init(conn.cursor()) self.rowNum = 10 + self.tbnum = 20 self.ts = 1537146000000 - - def run(self): + self.binary_str = 'taosdata' + self.nchar_str = '涛思数据' + + def set_create_normaltable_sql(self, ntbname, column_dict): + column_sql = '' + for k, v in column_dict.items(): + column_sql += f"{k} {v}," + create_ntb_sql = f'create table {ntbname} (ts timestamp,{column_sql[:-1]})' + return create_ntb_sql + + def set_create_stable_sql(self,stbname,column_dict,tag_dict): + column_sql = '' + tag_sql = '' + for k,v in column_dict.items(): + column_sql += f"{k} {v}," + for k,v in tag_dict.items(): + tag_sql += f"{k} {v}," + create_stb_sql = f'create table {stbname} (ts timestamp,{column_sql[:-1]}) tags({tag_sql[:-1]})' + return create_stb_sql + + def last_check_stb_tb_base(self): tdSql.prepare() + stbname = tdCom.getLongName(5, "letters") + column_dict = { + 'col1': 'tinyint', + 'col2': 'smallint', + 'col3': 'int', + 'col4': 'bigint', + 'col5': 'tinyint unsigned', + 'col6': 'smallint unsigned', + 'col7': 'int unsigned', + 'col8': 'bigint unsigned', + 'col9': 'float', + 'col10': 'double', + 'col11': 'bool', + 'col12': 'binary(20)', + 'col13': 'nchar(20)' + } + tag_dict = { + 'loc':'nchar(20)' + } + tdSql.execute(self.set_create_stable_sql(stbname,column_dict,tag_dict)) - tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') - tdSql.execute("create table stb_1 using stb tags('beijing')") - tdSql.execute("insert into stb_1(ts) values(%d)" % (self.ts - 1)) + tdSql.execute(f"create table {stbname}_1 using {stbname} tags('beijing')") + tdSql.execute(f"insert into {stbname}_1(ts) values(%d)" % (self.ts - 1)) - # last verifacation - tdSql.query("select last(*) from stb_1") + for i in [f'{stbname}_1', f'db.{stbname}_1']: + tdSql.query(f"select last(*) from {i}") + tdSql.checkRows(1) + tdSql.checkData(0, 1, None) + #!bug TD-16561 + # for i in ['stb','db.stb','stb','db.stb']: + # tdSql.query(f"select last(*) from {i}") + # tdSql.checkRows(1) + # tdSql.checkData(0, 1, None) + for i in column_dict.keys(): + for j in [f'{stbname}_1', f'db.{stbname}_1', f'{stbname}', f'db.{stbname}']: + tdSql.query(f"select last({i}) from {j}") + tdSql.checkRows(0) + tdSql.query(f"select last({list(column_dict.keys())[0]}) from {stbname}_1 group by {list(column_dict.keys())[-1]}") tdSql.checkRows(1) - tdSql.checkData(0, 1, None) - tdSql.query("select last(*) from db.stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 1, None) - tdSql.query("select last(col1) from stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col1) from db.stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col2) from stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col2) from db.stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col3) from stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col3) from db.stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col4) from stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col4) from db.stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col11) from stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col11) from db.stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col12) from stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col12) from db.stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col13) from stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col13) from db.stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col14) from stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col14) from db.stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col5) from stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col5) from db.stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col6) from stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col6) from db.stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col7) from stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col7) from db.stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col8) from stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col8) from db.stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col9) from stb_1") - tdSql.checkRows(0) - tdSql.query("select last(col9) from db.stb_1") - tdSql.checkRows(0) - tdSql.query("select count(col1) from stb_1 group by col7") - tdSql.checkRows(1) - for i in range(self.rowNum): - tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + tdSql.execute(f"insert into {stbname}_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + for i in [f'{stbname}_1', f'db.{stbname}_1', f'{stbname}', f'db.{stbname}']: + tdSql.query(f"select last(*) from {i}") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 10) + for k, v in column_dict.items(): + for j in [f'{stbname}_1', f'db.{stbname}_1', f'{stbname}', f'db.{stbname}']: + tdSql.query(f"select last({k}) from {j}") + tdSql.checkRows(1) + # tinyint,smallint,int,bigint,tinyint unsigned,smallint unsigned,int unsigned,bigint unsigned + if v.lower() == 'tinyint' or v.lower() == 'smallint' or v.lower() == 'int' or v.lower() == 'bigint' or v.lower() == 'tinyint unsigned' or v.lower() == 'smallint unsigned'\ + or v.lower() == 'int unsigned' or v.lower() == 'bigint unsigned': + tdSql.checkData(0, 0, 10) + # float,double + elif v.lower() == 'float' or v.lower() == 'double': + tdSql.checkData(0, 0, 9.1) + # bool + elif v.lower() == 'bool': + tdSql.checkData(0, 0, True) + # binary + elif 'binary' in v.lower(): + tdSql.checkData(0, 0, f'{self.binary_str}{self.rowNum}') + # nchar + elif 'nchar' in v.lower(): + tdSql.checkData(0, 0, f'{self.nchar_str}{self.rowNum}') + for i in [f'{stbname}_1', f'db.{stbname}_1', f'{stbname}', f'db.{stbname}']: + tdSql.query(f"select last({list(column_dict.keys())[0]},{list(column_dict.keys())[1]},{list(column_dict.keys())[2]}) from {stbname}_1") + tdSql.checkData(0, 2, 10) - tdSql.query("select last(*) from stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 1, 10) - tdSql.query("select last(*) from db.stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 1, 10) - tdSql.query("select last(col1) from stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col1) from db.stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col2) from stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col2) from db.stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col3) from stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col3) from db.stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col4) from stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col4) from db.stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col11) from stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col11) from db.stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col12) from stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col12) from db.stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col13) from stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col13) from db.stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col14) from stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col14) from db.stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col5) from stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 9.1) - tdSql.query("select last(col5) from db.stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 9.1) - tdSql.query("select last(col6) from stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 9.1) - tdSql.query("select last(col6) from db.stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 9.1) - tdSql.query("select last(col7) from stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, True) - tdSql.query("select last(col7) from db.stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, True) - tdSql.query("select last(col8) from stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 'taosdata10') - tdSql.query("select last(col8) from db.stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 'taosdata10') - tdSql.query("select last(col9) from stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, '涛思数据10') - tdSql.query("select last(col9) from db.stb_1") - tdSql.checkRows(1) - tdSql.checkData(0, 0, '涛思数据10') - tdSql.query("select last(col1,col2,col3) from stb_1") - tdSql.checkData(0,2,10) + tdSql.error(f"select {list(column_dict.keys())[0]} from {stbname} where last({list(column_dict.keys())[12]})='涛思数据10'") + tdSql.error(f"select {list(column_dict.keys())[0]} from {stbname}_1 where last({list(column_dict.keys())[12]})='涛思数据10'") + tdSql.execute('drop database db') - tdSql.query("select last(*) from stb") - tdSql.checkRows(1) - tdSql.checkData(0, 1, 10) - tdSql.query("select last(*) from db.stb") - tdSql.checkRows(1) - tdSql.checkData(0, 1, 10) - tdSql.query("select last(col1) from stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col1) from db.stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col2) from stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col2) from db.stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col3) from stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col3) from db.stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col4) from stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col4) from db.stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col11) from stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col11) from db.stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col12) from stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col12) from db.stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col13) from stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col13) from db.stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col14) from stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col14) from db.stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col5) from stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 9.1) - tdSql.query("select last(col5) from db.stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 9.1) - tdSql.query("select last(col6) from stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 9.1) - tdSql.query("select last(col6) from db.stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 9.1) - tdSql.query("select last(col7) from stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, True) - tdSql.query("select last(col7) from db.stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, True) - tdSql.query("select last(col8) from stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 'taosdata10') - tdSql.query("select last(col8) from db.stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 'taosdata10') - tdSql.query("select last(col9) from stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, '涛思数据10') - tdSql.query("select last(col9) from db.stb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, '涛思数据10') - tdSql.query("select last(col1,col2,col3) from stb") - tdSql.checkData(0,2,10) - - - tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') - tdSql.execute("insert into ntb(ts) values(%d)" % (self.ts - 1)) - tdSql.query("select last(*) from ntb") + def last_check_ntb_base(self): + tdSql.prepare() + ntbname = tdCom.getLongName(5, "letters") + column_dict = { + 'col1': 'tinyint', + 'col2': 'smallint', + 'col3': 'int', + 'col4': 'bigint', + 'col5': 'tinyint unsigned', + 'col6': 'smallint unsigned', + 'col7': 'int unsigned', + 'col8': 'bigint unsigned', + 'col9': 'float', + 'col10': 'double', + 'col11': 'bool', + 'col12': 'binary(20)', + 'col13': 'nchar(20)' + } + create_ntb_sql = self.set_create_normaltable_sql(ntbname, column_dict) + tdSql.execute(create_ntb_sql) + tdSql.execute(f"insert into {ntbname}(ts) values(%d)" % (self.ts - 1)) + tdSql.query(f"select last(*) from {ntbname}") tdSql.checkRows(1) tdSql.checkData(0, 1, None) - tdSql.query("select last(*) from db.ntb") + tdSql.query(f"select last(*) from db.{ntbname}") tdSql.checkRows(1) tdSql.checkData(0, 1, None) - tdSql.query("select last(col1) from ntb") - tdSql.checkRows(0) - tdSql.query("select last(col1) from db.ntb") - tdSql.checkRows(0) - tdSql.query("select last(col2) from ntb") - tdSql.checkRows(0) - tdSql.query("select last(col2) from db.ntb") - tdSql.checkRows(0) - tdSql.query("select last(col3) from ntb") - tdSql.checkRows(0) - tdSql.query("select last(col3) from db.ntb") - tdSql.checkRows(0) - tdSql.query("select last(col4) from ntb") - tdSql.checkRows(0) - tdSql.query("select last(col4) from db.ntb") - tdSql.checkRows(0) - tdSql.query("select last(col11) from ntb") - tdSql.checkRows(0) - tdSql.query("select last(col11) from db.ntb") - tdSql.checkRows(0) - tdSql.query("select last(col12) from ntb") - tdSql.checkRows(0) - tdSql.query("select last(col12) from db.ntb") - tdSql.checkRows(0) - tdSql.query("select last(col13) from ntb") - tdSql.checkRows(0) - tdSql.query("select last(col13) from db.ntb") - tdSql.checkRows(0) - tdSql.query("select last(col14) from ntb") - tdSql.checkRows(0) - tdSql.query("select last(col14) from db.ntb") - tdSql.checkRows(0) - tdSql.query("select last(col5) from ntb") - tdSql.checkRows(0) - tdSql.query("select last(col5) from db.ntb") - tdSql.checkRows(0) - tdSql.query("select last(col6) from ntb") - tdSql.checkRows(0) - tdSql.query("select last(col6) from db.ntb") - tdSql.checkRows(0) - tdSql.query("select last(col7) from ntb") - tdSql.checkRows(0) - tdSql.query("select last(col7) from db.ntb") - tdSql.checkRows(0) - tdSql.query("select last(col8) from ntb") - tdSql.checkRows(0) - tdSql.query("select last(col8) from db.ntb") - tdSql.checkRows(0) - tdSql.query("select last(col9) from ntb") - tdSql.checkRows(0) - tdSql.query("select last(col9) from db.ntb") - tdSql.checkRows(0) - + for i in column_dict.keys(): + for j in [f'{ntbname}', f'db.{ntbname}']: + tdSql.query(f"select last({i}) from {j}") + tdSql.checkRows(0) for i in range(self.rowNum): - tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - - tdSql.query("select last(*) from ntb") + tdSql.execute(f"insert into {ntbname} values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + tdSql.query(f"select last(*) from {ntbname}") tdSql.checkRows(1) tdSql.checkData(0, 1, 10) - tdSql.query("select last(*) from db.ntb") + tdSql.query(f"select last(*) from db.{ntbname}") tdSql.checkRows(1) tdSql.checkData(0, 1, 10) - tdSql.query("select last(col1) from ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col1) from db.ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col2) from ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col2) from db.ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col3) from ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col3) from db.ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col4) from ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col4) from db.ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col11) from ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col11) from db.ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col12) from ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col12) from db.ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col13) from ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col13) from db.ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col14) from ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col14) from db.ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 10) - tdSql.query("select last(col5) from ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 9.1) - tdSql.query("select last(col5) from db.ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 9.1) - tdSql.query("select last(col6) from ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 9.1) - tdSql.query("select last(col6) from db.ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 9.1) - tdSql.query("select last(col7) from ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, True) - tdSql.query("select last(col7) from db.ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, True) - tdSql.query("select last(col8) from ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 'taosdata10') - tdSql.query("select last(col8) from db.ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 'taosdata10') - tdSql.query("select last(col9) from ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, '涛思数据10') - tdSql.query("select last(col9) from db.ntb") - tdSql.checkRows(1) - tdSql.checkData(0, 0, '涛思数据10') - tdSql.query("select last(col1,col2,col3) from ntb") - tdSql.checkData(0,2,10) + for k, v in column_dict.items(): + for j in [f'{ntbname}', f'db.{ntbname}']: + tdSql.query(f"select last({k}) from {j}") + tdSql.checkRows(1) + # tinyint,smallint,int,bigint,tinyint unsigned,smallint unsigned,int unsigned,bigint unsigned + if v.lower() == 'tinyint' or v.lower() == 'smallint' or v.lower() == 'int' or v.lower() == 'bigint' or v.lower() == 'tinyint unsigned' or v.lower() == 'smallint unsigned'\ + or v.lower() == 'int unsigned' or v.lower() == 'bigint unsigned': + tdSql.checkData(0, 0, 10) + # float,double + elif v.lower() == 'float' or v.lower() == 'double': + tdSql.checkData(0, 0, 9.1) + # bool + elif v.lower() == 'bool': + tdSql.checkData(0, 0, True) + # binary + elif 'binary' in v.lower(): + tdSql.checkData(0, 0, f'{self.binary_str}{self.rowNum}') + # nchar + elif 'nchar' in v.lower(): + tdSql.checkData(0, 0, f'{self.nchar_str}{self.rowNum}') + + tdSql.error( + f"select {list(column_dict.keys())[0]} from {ntbname} where last({list(column_dict.keys())[9]})='涛思数据10'") + + def last_check_stb_distribute(self): + # prepare data for vgroup 4 + dbname = tdCom.getLongName(10, "letters") + stbname = tdCom.getLongName(5, "letters") + vgroup_num = 4 + column_dict = { + 'col1': 'tinyint', + 'col2': 'smallint', + 'col3': 'int', + 'col4': 'bigint', + 'col5': 'tinyint unsigned', + 'col6': 'smallint unsigned', + 'col7': 'int unsigned', + 'col8': 'bigint unsigned', + 'col9': 'float', + 'col10': 'double', + 'col11': 'bool', + 'col12': 'binary(20)', + 'col13': 'nchar(20)' + } + + tdSql.execute( + f"create database if not exists {dbname} vgroups {vgroup_num}") + tdSql.execute(f'use {dbname}') + + # build 20 child tables,every table insert 10 rows + tdSql.execute(f'''create table {stbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, + col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''') + for i in range(self.tbnum): + tdSql.execute( + f"create table {stbname}_{i} using {stbname} tags('beijing')") + tdSql.execute( + f"insert into {stbname}_{i}(ts) values(%d)" % (self.ts - 1-i)) + # for i in [f'{stbname}', f'{dbname}.{stbname}']: + # tdSql.query(f"select last(*) from {i}") + # tdSql.checkRows(1) + # tdSql.checkData(0, 1, None) + tdSql.query('show tables') + vgroup_list = [] + for i in range(len(tdSql.queryResult)): + vgroup_list.append(tdSql.queryResult[i][6]) + vgroup_list_set = set(vgroup_list) + for i in vgroup_list_set: + vgroups_num = vgroup_list.count(i) + if vgroups_num >= 2: + tdLog.info(f'This scene with {vgroups_num} vgroups is ok!') + continue + else: + tdLog.exit( + 'This scene does not meet the requirements with {vgroups_num} vgroup!\n') + + for i in range(self.tbnum): + for j in range(self.rowNum): + tdSql.execute(f"insert into {stbname}_{i} values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" + % (self.ts + j + i, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 1, j + 0.1, j + 0.1, j % 2, j + 1, j + 1)) + for i in [f'{stbname}', f'{dbname}.{stbname}']: + tdSql.query(f"select last(*) from {i}") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 10) + for k, v in column_dict.items(): + for j in [f'{stbname}', f'{dbname}.{stbname}']: + tdSql.query(f"select last({k}) from {j}") + tdSql.checkRows(1) + # tinyint,smallint,int,bigint,tinyint unsigned,smallint unsigned,int unsigned,bigint unsigned + if v.lower() == 'tinyint' or v.lower() == 'smallint' or v.lower() == 'int' or v.lower() == 'bigint' or v.lower() == 'tinyint unsigned' or v.lower() == 'smallint unsigned'\ + or v.lower() == 'int unsigned' or v.lower() == 'bigint unsigned': + tdSql.checkData(0, 0, 10) + # float,double + elif v.lower() == 'float' or v.lower() == 'double': + tdSql.checkData(0, 0, 9.1) + # bool + elif v.lower() == 'bool': + tdSql.checkData(0, 0, True) + # binary + elif 'binary' in v.lower(): + tdSql.checkData(0, 0, f'{self.binary_str}{self.rowNum}') + # nchar + elif 'nchar' in v.lower(): + tdSql.checkData(0, 0, f'{self.nchar_str}{self.rowNum}') + tdSql.execute(f'drop database {dbname}') + + def run(self): + self.last_check_stb_tb_base() + self.last_check_ntb_base() + self.last_check_stb_distribute() - tdSql.error("select col1 from stb where last(col9)='涛思数据10'") - tdSql.error("select col1 from ntb where last(col9)='涛思数据10'") - tdSql.error("select col1 from stb_1 where last(col9)='涛思数据10'") def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) + tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/max.py b/tests/system-test/2-query/max.py index 5342c7d449..46426a5a95 100644 --- a/tests/system-test/2-query/max.py +++ b/tests/system-test/2-query/max.py @@ -5,198 +5,213 @@ import numpy as np class TDTestCase: + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, + "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143, + "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor()) self.rowNum = 10 self.ts = 1537146000000 - - def prepare_data(self): - - pass - def run(self): + self.binary_str = 'taosdata' + self.nchar_str = '涛思数据' + def max_check_stb_and_tb_base(self): tdSql.prepare() - intData = [] floatData = [] - - tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') + tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, + col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''') tdSql.execute("create table stb_1 using stb tags('beijing')") - tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''') for i in range(self.rowNum): - tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) intData.append(i + 1) floatData.append(i + 0.1) - for i in range(self.rowNum): - tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - intData.append(i + 1) - floatData.append(i + 0.1) + for i in ['ts','col11','col12','col13']: + for j in ['db.stb','stb','db.stb_1','stb_1']: + tdSql.error(f'select max({i} from {j} )') - # max verifacation - tdSql.error("select max(ts) from stb_1") - tdSql.error("select max(ts) from db.stb_1") - tdSql.error("select max(col7) from stb_1") - tdSql.error("select max(col7) from db.stb_1") - tdSql.error("select max(col8) from stb_1") - tdSql.error("select max(col8) from db.stb_1") - tdSql.error("select max(col9) from stb_1") - tdSql.error("select max(col9) from db.stb_1") - - tdSql.query("select max(col1) from stb_1") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col1) from db.stb_1") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col2) from stb_1") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col2) from db.stb_1") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col3) from stb_1") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col3) from db.stb_1") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col4) from stb_1") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col4) from db.stb_1") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col11) from stb_1") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col11) from db.stb_1") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col12) from stb_1") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col12) from db.stb_1") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col13) from stb_1") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col13) from db.stb_1") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col14) from stb_1") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col14) from db.stb_1") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col5) from stb_1") - tdSql.checkData(0, 0, np.max(floatData)) - tdSql.query("select max(col5) from db.stb_1") - tdSql.checkData(0, 0, np.max(floatData)) - tdSql.query("select max(col6) from stb_1") - tdSql.checkData(0, 0, np.max(floatData)) - tdSql.query("select max(col6) from db.stb_1") - tdSql.checkData(0, 0, np.max(floatData)) + for i in range(1,11): + for j in ['db.stb','stb','db.stb_1','stb_1']: + tdSql.query(f"select max(col{i}) from {j}") + if i<9: + tdSql.checkData(0, 0, np.max(intData)) + elif i>=9: + tdSql.checkData(0, 0, np.max(floatData)) tdSql.query("select max(col1) from stb_1 where col2<=5") tdSql.checkData(0,0,5) - - - - tdSql.error("select max(ts) from stb") - tdSql.error("select max(ts) from db.stb") - tdSql.error("select max(col7) from stb") - tdSql.error("select max(col7) from db.stb") - tdSql.error("select max(col8) from stb") - tdSql.error("select max(col8) from db.stb") - tdSql.error("select max(col9) from stb") - tdSql.error("select max(col9) from db.stb") - - tdSql.query("select max(col1) from stb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col1) from db.stb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col2) from stb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col2) from db.stb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col3) from stb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col3) from db.stb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col4) from stb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col4) from db.stb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col11) from stb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col11) from db.stb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col12) from stb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col12) from db.stb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col13) from stb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col13) from db.stb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col14) from stb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col14) from db.stb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col5) from stb") - tdSql.checkData(0, 0, np.max(floatData)) - tdSql.query("select max(col5) from db.stb") - tdSql.checkData(0, 0, np.max(floatData)) - tdSql.query("select max(col6) from stb") - tdSql.checkData(0, 0, np.max(floatData)) - tdSql.query("select max(col6) from db.stb") - tdSql.checkData(0, 0, np.max(floatData)) tdSql.query("select max(col1) from stb where col2<=5") tdSql.checkData(0,0,5) - - - - tdSql.error("select max(ts) from ntb") - tdSql.error("select max(ts) from db.ntb") - tdSql.error("select max(col7) from ntb") - tdSql.error("select max(col7) from db.ntb") - tdSql.error("select max(col8) from ntb") - tdSql.error("select max(col8) from db.ntb") - tdSql.error("select max(col9) from ntb") - tdSql.error("select max(col9) from db.ntb") - - tdSql.query("select max(col1) from ntb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col1) from db.ntb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col2) from ntb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col2) from db.ntb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col3) from ntb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col3) from db.ntb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col4) from ntb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col4) from db.ntb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col11) from ntb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col11) from db.ntb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col12) from ntb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col12) from db.ntb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col13) from ntb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col13) from db.ntb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col14) from ntb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col14) from db.ntb") - tdSql.checkData(0, 0, np.max(intData)) - tdSql.query("select max(col5) from ntb") - tdSql.checkData(0, 0, np.max(floatData)) - tdSql.query("select max(col5) from db.ntb") - tdSql.checkData(0, 0, np.max(floatData)) - tdSql.query("select max(col6) from ntb") - tdSql.checkData(0, 0, np.max(floatData)) - tdSql.query("select max(col6) from db.ntb") - tdSql.checkData(0, 0, np.max(floatData)) - tdSql.query("select max(col1) from stb_1 where col2<=5") + tdSql.execute('drop database db') + + def max_check_ntb_base(self): + tdSql.prepare() + intData = [] + floatData = [] + tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, + col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20))''') + for i in range(self.rowNum): + tdSql.execute(f"insert into ntb values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + intData.append(i + 1) + floatData.append(i + 0.1) + for i in ['ts','col11','col12','col13']: + for j in ['db.ntb','ntb']: + tdSql.error(f'select max({i} from {j} )') + for i in range(1,11): + for j in ['db.ntb','ntb']: + tdSql.query(f"select max(col{i}) from {j}") + if i<9: + tdSql.checkData(0, 0, np.max(intData)) + elif i>=9: + tdSql.checkData(0, 0, np.max(floatData)) + tdSql.query("select max(col1) from ntb where col2<=5") tdSql.checkData(0,0,5) + tdSql.execute('drop database db') + + + def check_max_functions(self, tbname , col_name): + + max_sql = f"select max({col_name}) from {tbname};" + + same_sql = f"select {col_name} from {tbname} order by {col_name} desc limit 1" + + tdSql.query(max_sql) + max_result = tdSql.queryResult + + tdSql.query(same_sql) + same_result = tdSql.queryResult + + if max_result !=same_result: + tdLog.exit(" max function work not as expected, sql : %s "% max_sql) + else: + tdLog.info(" max function work as expected, sql : %s "% max_sql) + + + def support_distributed_aggregate(self): + + # prepate datas for 20 tables distributed at different vgroups + tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5") + tdSql.execute(" use testdb ") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) + ''' + ) + + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(20): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + + for i in range(9): + tdSql.execute( + f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + tdSql.execute( + f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + ) + + for i in range(1,21): + if i ==1 or i == 4: + continue + else: + tbname = "ct"+f'{i}' + for j in range(9): + tdSql.execute( + f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" + ) + tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + + tdSql.execute( + f'''insert into t1 values + ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) + ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) + ( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a ) + ( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a ) + ( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a ) + ( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a ) + ( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" ) + ( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" ) + ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" ) + ( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ''' + ) + + tdLog.info(" prepare data for distributed_aggregate done! ") + + # get vgroup_ids of all + tdSql.query("show vgroups ") + vgroups = tdSql.queryResult + + vnode_tables={} + + for vgroup_id in vgroups: + vnode_tables[vgroup_id[0]]=[] + + + # check sub_table of per vnode ,make sure sub_table has been distributed + tdSql.query("show tables like 'ct%'") + table_names = tdSql.queryResult + tablenames = [] + for table_name in table_names: + vnode_tables[table_name[6]].append(table_name[0]) + + count = 0 + for k ,v in vnode_tables.items(): + if len(v)>=2: + count+=1 + if count < 2: + tdLog.exit(" the datas of all not satisfy sub_table has been distributed ") + + # check max function work status + + tdSql.query("show tables like 'ct%'") + table_names = tdSql.queryResult + tablenames = [] + for table_name in table_names: + tablenames.append(table_name[0]) + + tdSql.query("desc stb1") + col_names = tdSql.queryResult + + colnames = [] + for col_name in col_names: + if col_name[1] in ["INT" ,"BIGINT" ,"SMALLINT" ,"TINYINT" , "FLOAT" ,"DOUBLE"]: + colnames.append(col_name[0]) + + for tablename in tablenames: + for colname in colnames: + self.check_max_functions(tablename,colname) + + # max function with basic filter + print(vnode_tables) + + + def run(self): + + # max verifacation + self.max_check_stb_and_tb_base() + self.max_check_ntb_base() + + self.support_distributed_aggregate() + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/2-query/top.py b/tests/system-test/2-query/top.py index 146bb34937..83f535856e 100644 --- a/tests/system-test/2-query/top.py +++ b/tests/system-test/2-query/top.py @@ -11,6 +11,9 @@ # -*- coding: utf-8 -*- +import random +import string +from util.common import * from util.log import * from util.cases import * from util.sql import * @@ -22,82 +25,89 @@ class TDTestCase: tdSql.init(conn.cursor()) self.rowNum = 10 + self.tbnum = 20 self.ts = 1537146000000 - - def run(self): + self.binary_str = 'taosdata' + self.nchar_str = '涛思数据' + def top_check_base(self): tdSql.prepare() - - - - tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, - col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') - tdSql.execute("create table test1 using test tags('beijing')") + tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, + col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''') + tdSql.execute("create table stb_1 using stb tags('beijing')") for i in range(self.rowNum): - tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" - % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) - - - # top verifacation - tdSql.error("select top(ts, 10) from test") - tdSql.error("select top(col1, 0) from test") - tdSql.error("select top(col1, 101) from test") - tdSql.error("select top(col2, 0) from test") - tdSql.error("select top(col2, 101) from test") - tdSql.error("select top(col3, 0) from test") - tdSql.error("select top(col3, 101) from test") - tdSql.error("select top(col4, 0) from test") - tdSql.error("select top(col4, 101) from test") - tdSql.error("select top(col5, 0) from test") - tdSql.error("select top(col5, 101) from test") - tdSql.error("select top(col6, 0) from test") - tdSql.error("select top(col6, 101) from test") - tdSql.error("select top(col7, 10) from test") - tdSql.error("select top(col8, 10) from test") - tdSql.error("select top(col9, 10) from test") - tdSql.error("select top(col11, 0) from test") - tdSql.error("select top(col11, 101) from test") - tdSql.error("select top(col12, 0) from test") - tdSql.error("select top(col12, 101) from test") - tdSql.error("select top(col13, 0) from test") - tdSql.error("select top(col13, 101) from test") - tdSql.error("select top(col14, 0) from test") - tdSql.error("select top(col14, 101) from test") - - tdSql.query("select top(col1, 2) from test") + tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + column_list = ['col1','col2','col3','col4','col5','col6','col7','col8'] + error_column_list = ['col11','col12','col13'] + error_param_list = [0,101] + for i in column_list: + tdSql.query(f'select top({i},2) from stb_1') + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) + for j in error_param_list: + tdSql.error(f'select top({i},{j}) from stb_1') + for i in error_column_list: + tdSql.error(f'select top({i},10) from stb_1') + tdSql.query("select ts,top(col1, 2),ts from stb_1 group by tbname") tdSql.checkRows(2) - tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) - tdSql.query("select top(col2, 2) from test") - tdSql.checkRows(2) - tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) - tdSql.query("select top(col3, 2) from test") - tdSql.checkRows(2) - tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) - tdSql.query("select top(col4, 2) from test") - tdSql.checkRows(2) - tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) - tdSql.query("select top(col11, 2) from test") - tdSql.checkRows(2) - tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) - tdSql.query("select top(col12, 2) from test") - tdSql.checkRows(2) - tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) - tdSql.query("select top(col13, 2) from test") - tdSql.checkRows(2) - tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) - tdSql.query("select top(col14, 2) from test") - tdSql.checkRows(2) - tdSql.checkEqual(tdSql.queryResult,[(9,),(10,)]) - tdSql.query("select ts,top(col1, 2),ts from test1") - tdSql.checkRows(2) - tdSql.query("select top(col14, 100) from test") - tdSql.checkRows(10) - tdSql.query("select ts,top(col1, 2),ts from test group by tbname") - tdSql.checkRows(2) - tdSql.query('select top(col2,1) from test interval(1y) order by col2') + tdSql.query('select top(col2,1) from stb_1 interval(1y) order by col2') tdSql.checkData(0,0,10) + tdSql.error("select * from stb_1 where top(col2,1)=1") + tdSql.execute('drop database db') + def top_check_stb_distribute(self): + # prepare data for vgroup 4 + dbname = tdCom.getLongName(10, "letters") + stbname = tdCom.getLongName(5, "letters") + tdSql.execute(f"create database if not exists {dbname} vgroups 2") + tdSql.execute(f'use {dbname}') + # build 20 child tables,every table insert 10 rows + tdSql.execute(f'''create table {stbname}(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned, + col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''') + for i in range(self.tbnum): + tdSql.execute(f"create table {stbname}_{i} using {stbname} tags('beijing')") + tdSql.execute(f"insert into {stbname}_{i}(ts) values(%d)" % (self.ts - 1-i)) + column_list = ['col1','col2','col3','col4','col5','col6','col7','col8'] + for i in [f'{stbname}', f'{dbname}.{stbname}']: + for j in column_list: + tdSql.query(f"select top({j},1) from {i}") + tdSql.checkRows(0) + tdSql.query('show tables') + vgroup_list = [] + for i in range(len(tdSql.queryResult)): + vgroup_list.append(tdSql.queryResult[i][6]) + vgroup_list_set = set(vgroup_list) - tdSql.error("select * from test where bottom(col2,1)=1") - tdSql.error("select top(col14, 0) from test;") + for i in vgroup_list_set: + vgroups_num = vgroup_list.count(i) + if vgroups_num >=2: + tdLog.info(f'This scene with {vgroups_num} vgroups is ok!') + continue + else: + tdLog.exit(f'This scene does not meet the requirements with {vgroups_num} vgroup!\n') + for i in range(self.rowNum): + for j in range(self.tbnum): + tdSql.execute(f"insert into {stbname}_{j} values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')" + % (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1)) + + error_column_list = ['col11','col12','col13'] + error_param_list = [0,101] + for i in column_list: + tdSql.query(f'select top({i},2) from {stbname}') + tdSql.checkRows(2) + tdSql.checkEqual(tdSql.queryResult,[(10,),(10,)]) + for j in error_param_list: + tdSql.error(f'select top({i},{j}) from {stbname}') + for i in error_column_list: + tdSql.error(f'select top({i},10) from {stbname}') + + tdSql.query(f"select ts,top(col1, 2),ts from {stbname} group by tbname") + tdSql.checkRows(2*self.tbnum) + tdSql.query(f'select top(col2,1) from {stbname} interval(1y) order by col2') + tdSql.checkData(0,0,10) + tdSql.error(f"select * from {stbname} where top(col2,1)=1") + def run(self): + self.top_check_base() + self.top_check_stb_distribute() def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/system-test/6-cluster/5dnode3mnodeDrop.py b/tests/system-test/6-cluster/5dnode3mnodeDrop.py index f999a16b05..b98134f5e0 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeDrop.py +++ b/tests/system-test/6-cluster/5dnode3mnodeDrop.py @@ -269,7 +269,8 @@ class TDTestCase: tdSql.query("show dnodes;") print(tdSql.queryResult) - # drop and follower of mnode + + # drop follower of mnode dropcount =0 while dropcount <= 10: for i in range(1,3): diff --git a/tests/system-test/6-cluster/5dnode3mnodeDropInsert.py b/tests/system-test/6-cluster/5dnode3mnodeDropInsert.py new file mode 100644 index 0000000000..cfa3920604 --- /dev/null +++ b/tests/system-test/6-cluster/5dnode3mnodeDropInsert.py @@ -0,0 +1,428 @@ +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +import time +import socket +import subprocess +from multiprocessing import Process +import threading +import time +import inspect +import ctypes +class MyDnodes(TDDnodes): + def __init__(self ,dnodes_lists): + super(MyDnodes,self).__init__() + self.dnodes = dnodes_lists # dnode must be TDDnode instance + self.simDeployed = False + + +class TDTestCase: + + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + self.TDDnodes = None + self.ts = 1500000000000 + + def buildcluster(self,dnodenumber): + self.depoly_cluster(dnodenumber) + self.master_dnode = self.TDDnodes.dnodes[0] + self.host=self.master_dnode.cfgDict["fqdn"] + conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir) + tdSql.init(conn1.cursor()) + + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def _async_raise(self, tid, exctype): + """raises the exception, performs cleanup if needed""" + if not inspect.isclass(exctype): + exctype = type(exctype) + res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype)) + if res == 0: + raise ValueError("invalid thread id") + elif res != 1: + # """if it returns a number greater than one, you're in trouble, + # and you should call it again with exc=NULL to revert the effect""" + ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) + raise SystemError("PyThreadState_SetAsyncExc failed") + + def stop_thread(self,thread): + self._async_raise(thread.ident, SystemExit) + + + def createDbTbale(self,dbcountStart,dbcountStop,stbname,chilCount): + # fisrt add data : db\stable\childtable\general table + + for couti in range(dbcountStart,dbcountStop): + tdLog.debug("drop database if exists db%d" %couti) + tdSql.execute("drop database if exists db%d" %couti) + print("create database if not exists db%d replica 1 duration 300" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + tdSql.execute("use db%d" %couti) + tdSql.execute( + '''create table %s + (ts timestamp, c1 int, c2 bigint,c3 binary(16), c4 timestamp) + tags (t1 int) + '''%stbname + ) + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(chilCount): + tdSql.execute(f'create table {stbname}_{i+1} using {stbname} tags ( {i+1} )') + + def insertTabaleData(self,dbcountStart,dbcountStop,stbname,chilCount,ts_start,rowCount): + # insert data : create childtable and data + + for couti in range(dbcountStart,dbcountStop): + tdSql.execute("use db%d" %couti) + pre_insert = "insert into " + sql = pre_insert + chilCount=int(chilCount) + allRows=chilCount*rowCount + tdLog.debug("doing insert data into stable-index:%s rows:%d ..."%(stbname, allRows)) + exeStartTime=time.time() + for i in range(0,chilCount): + sql += " %s_%d values "%(stbname,i) + for j in range(rowCount): + sql += "(%d, %d, %d,'taos_%d',%d) "%(ts_start + j*1000, j, j, j, ts_start + j*1000) + if j >0 and j%4000 == 0: + # print(sql) + tdSql.execute(sql) + sql = "insert into %s_%d values " %(stbname,i) + # end sql + if sql != pre_insert: + # print(sql) + print(len(sql)) + tdSql.execute(sql) + exeEndTime=time.time() + spendTime=exeEndTime-exeStartTime + speedInsert=allRows/spendTime + tdLog.debug("spent %.2fs to INSERT %d rows into %s , insert rate is %.2f rows/s... [OK]"% (spendTime,allRows,stbname,speedInsert)) + + def checkData(self,dbname,stbname,stableCount,CtableCount,rowsPerSTable,): + tdSql.execute("use %s"%dbname) + tdSql.query("show stables") + tdSql.checkRows(stableCount) + tdSql.query("show tables") + tdSql.checkRows(CtableCount) + for i in range(stableCount): + tdSql.query("select count(*) from %s%d"%(stbname,i)) + tdSql.checkData(0,0,rowsPerSTable) + return + + + def depoly_cluster(self ,dnodes_nums): + + testCluster = False + valgrind = 0 + hostname = socket.gethostname() + dnodes = [] + start_port = 6030 + start_port_sec = 6130 + for num in range(1, dnodes_nums+1): + dnode = TDDnode(num) + dnode.addExtraCfg("firstEp", f"{hostname}:{start_port}") + dnode.addExtraCfg("fqdn", f"{hostname}") + dnode.addExtraCfg("serverPort", f"{start_port + (num-1)*100}") + dnode.addExtraCfg("monitorFqdn", hostname) + dnode.addExtraCfg("monitorPort", 7043) + dnode.addExtraCfg("secondEp", f"{hostname}:{start_port_sec}") + dnodes.append(dnode) + + self.TDDnodes = MyDnodes(dnodes) + self.TDDnodes.init("") + self.TDDnodes.setTestCluster(testCluster) + self.TDDnodes.setValgrind(valgrind) + self.TDDnodes.stopAll() + for dnode in self.TDDnodes.dnodes: + self.TDDnodes.deploy(dnode.index,{}) + + for dnode in self.TDDnodes.dnodes: + self.TDDnodes.starttaosd(dnode.index) + + # create cluster + for dnode in self.TDDnodes.dnodes[1:]: + # print(dnode.cfgDict) + dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"] + dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0] + dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1] + cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;" + print(cmd) + os.system(cmd) + + time.sleep(2) + tdLog.info(" create cluster with %d dnode done! " %dnodes_nums) + + def checkdnodes(self,dnodenumber): + count=0 + while count < 10: + time.sleep(1) + statusReadyBumber=0 + tdSql.query("show dnodes;") + if tdSql.checkRows(dnodenumber) : + print("dnode is %d nodes"%dnodenumber) + for i in range(dnodenumber): + if tdSql.queryResult[i][4] !='ready' : + status=tdSql.queryResult[i][4] + print("dnode:%d status is %s "%(i,status)) + break + else: + statusReadyBumber+=1 + print(statusReadyBumber) + if statusReadyBumber == dnodenumber : + print("all of %d mnodes is ready in 10s "%dnodenumber) + return True + break + count+=1 + else: + print("%d mnodes is not ready in 10s "%dnodenumber) + return False + + + def check3mnode(self): + count=0 + while count < 10: + time.sleep(1) + tdSql.query("show mnodes;") + if tdSql.checkRows(3) : + print("mnode is three nodes") + if tdSql.queryResult[0][2]=='leader' : + if tdSql.queryResult[1][2]=='follower': + if tdSql.queryResult[2][2]=='follower': + print("three mnodes is ready in 10s") + break + elif tdSql.queryResult[0][2]=='follower' : + if tdSql.queryResult[1][2]=='leader': + if tdSql.queryResult[2][2]=='follower': + print("three mnodes is ready in 10s") + break + elif tdSql.queryResult[0][2]=='follower' : + if tdSql.queryResult[1][2]=='follower': + if tdSql.queryResult[2][2]=='leader': + print("three mnodes is ready in 10s") + break + count+=1 + else: + print("three mnodes is not ready in 10s ") + return -1 + + tdSql.query("show mnodes;") + tdSql.checkRows(3) + tdSql.checkData(0,1,'%s:6030'%self.host) + tdSql.checkData(0,3,'ready') + tdSql.checkData(1,1,'%s:6130'%self.host) + tdSql.checkData(1,3,'ready') + tdSql.checkData(2,1,'%s:6230'%self.host) + tdSql.checkData(2,3,'ready') + + def check3mnode1off(self): + count=0 + while count < 10: + time.sleep(1) + tdSql.query("show mnodes;") + if tdSql.checkRows(3) : + print("mnode is three nodes") + if tdSql.queryResult[0][2]=='offline' : + if tdSql.queryResult[1][2]=='leader': + if tdSql.queryResult[2][2]=='follower': + print("stop mnodes on dnode 2 successfully in 10s") + break + elif tdSql.queryResult[1][2]=='follower': + if tdSql.queryResult[2][2]=='leader': + print("stop mnodes on dnode 2 successfully in 10s") + break + count+=1 + else: + print("stop mnodes on dnode 2 failed in 10s ") + return -1 + tdSql.error("drop mnode on dnode 1;") + + tdSql.query("show mnodes;") + tdSql.checkRows(3) + tdSql.checkData(0,1,'%s:6030'%self.host) + tdSql.checkData(0,2,'offline') + tdSql.checkData(0,3,'ready') + tdSql.checkData(1,1,'%s:6130'%self.host) + tdSql.checkData(1,3,'ready') + tdSql.checkData(2,1,'%s:6230'%self.host) + tdSql.checkData(2,3,'ready') + + def check3mnode2off(self): + count=0 + while count < 40: + time.sleep(1) + tdSql.query("show mnodes;") + if tdSql.checkRows(3) : + print("mnode is three nodes") + if tdSql.queryResult[0][2]=='leader' : + if tdSql.queryResult[1][2]=='offline': + if tdSql.queryResult[2][2]=='follower': + print("stop mnodes on dnode 2 successfully in 10s") + break + count+=1 + else: + print("stop mnodes on dnode 2 failed in 10s ") + return -1 + tdSql.error("drop mnode on dnode 2;") + + tdSql.query("show mnodes;") + tdSql.checkRows(3) + tdSql.checkData(0,1,'%s:6030'%self.host) + tdSql.checkData(0,2,'leader') + tdSql.checkData(0,3,'ready') + tdSql.checkData(1,1,'%s:6130'%self.host) + tdSql.checkData(1,2,'offline') + tdSql.checkData(1,3,'ready') + tdSql.checkData(2,1,'%s:6230'%self.host) + tdSql.checkData(2,2,'follower') + tdSql.checkData(2,3,'ready') + + def check3mnode3off(self): + count=0 + while count < 10: + time.sleep(1) + tdSql.query("show mnodes;") + if tdSql.checkRows(3) : + print("mnode is three nodes") + if tdSql.queryResult[0][2]=='leader' : + if tdSql.queryResult[2][2]=='offline': + if tdSql.queryResult[1][2]=='follower': + print("stop mnodes on dnode 3 successfully in 10s") + break + count+=1 + else: + print("stop mnodes on dnode 3 failed in 10s") + return -1 + tdSql.error("drop mnode on dnode 3;") + tdSql.query("show mnodes;") + tdSql.checkRows(3) + tdSql.checkData(0,1,'%s:6030'%self.host) + tdSql.checkData(0,2,'leader') + tdSql.checkData(0,3,'ready') + tdSql.checkData(1,1,'%s:6130'%self.host) + tdSql.checkData(1,2,'follower') + tdSql.checkData(1,3,'ready') + tdSql.checkData(2,1,'%s:6230'%self.host) + tdSql.checkData(2,2,'offline') + tdSql.checkData(2,3,'ready') + + def five_dnode_three_mnode(self,dnodenumber): + # testcase parameters + vgroups=1 + dbcountStart=0 + dbcountStop=1 + dbname="db" + stbname="stb" + tablesPerStb=1000 + rowsPerTable=100 + startTs=1640966400000 # 2022-01-01 00:00:00.000 + + tdSql.query("show dnodes;") + tdSql.checkData(0,1,'%s:6030'%self.host) + tdSql.checkData(4,1,'%s:6430'%self.host) + tdSql.checkData(0,4,'ready') + tdSql.checkData(4,4,'ready') + tdSql.query("show mnodes;") + tdSql.checkRows(1) + tdSql.checkData(0,1,'%s:6030'%self.host) + tdSql.checkData(0,2,'leader') + tdSql.checkData(0,3,'ready') + + # fisr add three mnodes; + tdSql.execute("create mnode on dnode 2") + tdSql.execute("create mnode on dnode 3") + + # fisrt check statut ready + self.check3mnode() + + tdSql.error("create mnode on dnode 2") + tdSql.query("show dnodes;") + print(tdSql.queryResult) + tdLog.debug("stop all of mnode ") + + # drop follower of mnode and insert data + self.createDbTbale(dbcountStart, dbcountStop,stbname,tablesPerStb) + #(method) insertTabaleData: (dbcountStart: Any, dbcountStop: Any, stbname: Any, chilCount: Any, ts_start: Any, rowCount: Any) -> None + threads=threading.Thread(target=self.insertTabaleData, args=( + dbcountStart, + dbcountStop, + stbname, + tablesPerStb, + startTs, + rowsPerTable)) + + threads.start() + dropcount =0 + while dropcount <= 10: + for i in range(1,3): + tdLog.debug("drop mnode on dnode %d"%(i+1)) + tdSql.execute("drop mnode on dnode %d"%(i+1)) + tdSql.query("show mnodes;") + count=0 + while count<10: + time.sleep(1) + tdSql.query("show mnodes;") + if tdSql.checkRows(2): + print("drop mnode %d successfully"%(i+1)) + break + count+=1 + tdLog.debug("create mnode on dnode %d"%(i+1)) + tdSql.execute("create mnode on dnode %d"%(i+1)) + count=0 + while count<10: + time.sleep(1) + tdSql.query("show mnodes;") + if tdSql.checkRows(3): + print("drop mnode %d successfully"%(i+1)) + break + count+=1 + dropcount+=1 + threads.join() + self.check3mnode() + + + + def getConnection(self, dnode): + host = dnode.cfgDict["fqdn"] + port = dnode.cfgDict["serverPort"] + config_dir = dnode.cfgDir + return taos.connect(host=host, port=int(port), config=config_dir) + + + def run(self): + # print(self.master_dnode.cfgDict) + self.buildcluster(5) + self.five_dnode_three_mnode(5) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/6-cluster/5dnode3mnodeSeperate1VnodeStopInsert.py b/tests/system-test/6-cluster/5dnode3mnodeSeperate1VnodeStopInsert.py new file mode 100644 index 0000000000..1739db09af --- /dev/null +++ b/tests/system-test/6-cluster/5dnode3mnodeSeperate1VnodeStopInsert.py @@ -0,0 +1,377 @@ +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +import time +import socket +import subprocess +from multiprocessing import Process +import threading +import time +import inspect +import ctypes +class MyDnodes(TDDnodes): + def __init__(self ,dnodes_lists): + super(MyDnodes,self).__init__() + self.dnodes = dnodes_lists # dnode must be TDDnode instance + self.simDeployed = False + + +class TDTestCase: + + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + self.TDDnodes = None + + def buildcluster(self,dnodenumber): + self.depoly_cluster(dnodenumber) + self.master_dnode = self.TDDnodes.dnodes[0] + self.host=self.master_dnode.cfgDict["fqdn"] + conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir) + tdSql.init(conn1.cursor()) + + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def _async_raise(self, tid, exctype): + """raises the exception, performs cleanup if needed""" + if not inspect.isclass(exctype): + exctype = type(exctype) + res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype)) + if res == 0: + raise ValueError("invalid thread id") + elif res != 1: + # """if it returns a number greater than one, you're in trouble, + # and you should call it again with exc=NULL to revert the effect""" + ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) + raise SystemError("PyThreadState_SetAsyncExc failed") + + def stop_thread(self,thread): + self._async_raise(thread.ident, SystemExit) + + + def insert_data(self,countstart,countstop): + # fisrt add data : db\stable\childtable\general table + + for couti in range(countstart,countstop): + tdLog.debug("drop database if exists db%d" %couti) + tdSql.execute("drop database if exists db%d" %couti) + print("create database if not exists db%d replica 1 duration 300" %couti) + tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti) + tdSql.execute("use db%d" %couti) + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + def checkData(self,dbname,stbname,stableCount,CtableCount,rowsPerSTable,): + tdSql.execute("use %s"%dbname) + tdSql.query("show stables") + tdSql.checkRows(stableCount) + tdSql.query("show tables") + tdSql.checkRows(CtableCount) + for i in range(stableCount): + tdSql.query("select count(*) from %s%d"%(stbname,i)) + tdSql.checkData(0,0,rowsPerSTable) + return + + def depoly_cluster(self ,dnodes_nums=5,independent=True): + + testCluster = False + valgrind = 0 + hostname = socket.gethostname() + dnodes = [] + start_port = 6030 + start_port_sec = 6130 + for num in range(1, dnodes_nums+1): + dnode = TDDnode(num) + dnode.addExtraCfg("firstEp", f"{hostname}:{start_port}") + dnode.addExtraCfg("fqdn", f"{hostname}") + dnode.addExtraCfg("serverPort", f"{start_port + (num-1)*100}") + dnode.addExtraCfg("monitorFqdn", hostname) + dnode.addExtraCfg("monitorPort", 7043) + dnode.addExtraCfg("secondEp", f"{hostname}:{start_port_sec}") + # configure three dnoe don't support vnodes + if independent and (num < 4): + dnode.addExtraCfg("supportVnodes", 0) + + dnodes.append(dnode) + + self.TDDnodes = MyDnodes(dnodes) + self.TDDnodes.init("") + self.TDDnodes.setTestCluster(testCluster) + self.TDDnodes.setValgrind(valgrind) + self.TDDnodes.stopAll() + for dnode in self.TDDnodes.dnodes: + self.TDDnodes.deploy(dnode.index,{}) + + for dnode in self.TDDnodes.dnodes: + self.TDDnodes.starttaosd(dnode.index) + + # create cluster + for dnode in self.TDDnodes.dnodes[1:]: + # print(dnode.cfgDict) + dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"] + dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0] + dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1] + cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;" + print(cmd) + os.system(cmd) + + time.sleep(2) + tdLog.info(" create cluster with %d dnode done! " %dnodes_nums) + + def checkdnodes(self,dnodenumber): + count=0 + while count < 100: + time.sleep(1) + statusReadyBumber=0 + tdSql.query("show dnodes;") + if tdSql.checkRows(dnodenumber) : + print("dnode is %d nodes"%dnodenumber) + for i in range(dnodenumber): + if tdSql.queryResult[i][4] !='ready' : + status=tdSql.queryResult[i][4] + print("dnode:%d status is %s "%(i,status)) + break + else: + statusReadyBumber+=1 + print(statusReadyBumber) + if statusReadyBumber == dnodenumber : + print("all of %d mnodes is ready in 10s "%dnodenumber) + return True + break + count+=1 + else: + print("%d mnodes is not ready in 10s "%dnodenumber) + return False + + + def check3mnode(self): + count=0 + while count < 10: + time.sleep(1) + tdSql.query("show mnodes;") + if tdSql.checkRows(3) : + print("mnode is three nodes") + if tdSql.queryResult[0][2]=='leader' : + if tdSql.queryResult[1][2]=='follower': + if tdSql.queryResult[2][2]=='follower': + print("three mnodes is ready in 10s") + break + elif tdSql.queryResult[0][2]=='follower' : + if tdSql.queryResult[1][2]=='leader': + if tdSql.queryResult[2][2]=='follower': + print("three mnodes is ready in 10s") + break + elif tdSql.queryResult[0][2]=='follower' : + if tdSql.queryResult[1][2]=='follower': + if tdSql.queryResult[2][2]=='leader': + print("three mnodes is ready in 10s") + break + count+=1 + else: + print("three mnodes is not ready in 10s ") + return -1 + + tdSql.query("show mnodes;") + tdSql.checkRows(3) + tdSql.checkData(0,1,'%s:6030'%self.host) + tdSql.checkData(0,3,'ready') + tdSql.checkData(1,1,'%s:6130'%self.host) + tdSql.checkData(1,3,'ready') + tdSql.checkData(2,1,'%s:6230'%self.host) + tdSql.checkData(2,3,'ready') + + def check3mnode1off(self): + count=0 + while count < 10: + time.sleep(1) + tdSql.query("show mnodes;") + if tdSql.checkRows(3) : + print("mnode is three nodes") + if tdSql.queryResult[0][2]=='offline' : + if tdSql.queryResult[1][2]=='leader': + if tdSql.queryResult[2][2]=='follower': + print("stop mnodes on dnode 2 successfully in 10s") + break + elif tdSql.queryResult[1][2]=='follower': + if tdSql.queryResult[2][2]=='leader': + print("stop mnodes on dnode 2 successfully in 10s") + break + count+=1 + else: + print("stop mnodes on dnode 2 failed in 10s ") + return -1 + tdSql.error("drop mnode on dnode 1;") + + tdSql.query("show mnodes;") + tdSql.checkRows(3) + tdSql.checkData(0,1,'%s:6030'%self.host) + tdSql.checkData(0,2,'offline') + tdSql.checkData(0,3,'ready') + tdSql.checkData(1,1,'%s:6130'%self.host) + tdSql.checkData(1,3,'ready') + tdSql.checkData(2,1,'%s:6230'%self.host) + tdSql.checkData(2,3,'ready') + + def check3mnode2off(self): + count=0 + while count < 40: + time.sleep(1) + tdSql.query("show mnodes;") + if tdSql.checkRows(3) : + print("mnode is three nodes") + if tdSql.queryResult[0][2]=='leader' : + if tdSql.queryResult[1][2]=='offline': + if tdSql.queryResult[2][2]=='follower': + print("stop mnodes on dnode 2 successfully in 10s") + break + count+=1 + else: + print("stop mnodes on dnode 2 failed in 10s ") + return -1 + tdSql.error("drop mnode on dnode 2;") + + tdSql.query("show mnodes;") + tdSql.checkRows(3) + tdSql.checkData(0,1,'%s:6030'%self.host) + tdSql.checkData(0,2,'leader') + tdSql.checkData(0,3,'ready') + tdSql.checkData(1,1,'%s:6130'%self.host) + tdSql.checkData(1,2,'offline') + tdSql.checkData(1,3,'ready') + tdSql.checkData(2,1,'%s:6230'%self.host) + tdSql.checkData(2,2,'follower') + tdSql.checkData(2,3,'ready') + + def check3mnode3off(self): + count=0 + while count < 10: + time.sleep(1) + tdSql.query("show mnodes;") + if tdSql.checkRows(3) : + print("mnode is three nodes") + if tdSql.queryResult[0][2]=='leader' : + if tdSql.queryResult[2][2]=='offline': + if tdSql.queryResult[1][2]=='follower': + print("stop mnodes on dnode 3 successfully in 10s") + break + count+=1 + else: + print("stop mnodes on dnode 3 failed in 10s") + return -1 + tdSql.error("drop mnode on dnode 3;") + tdSql.query("show mnodes;") + tdSql.checkRows(3) + tdSql.checkData(0,1,'%s:6030'%self.host) + tdSql.checkData(0,2,'leader') + tdSql.checkData(0,3,'ready') + tdSql.checkData(1,1,'%s:6130'%self.host) + tdSql.checkData(1,2,'follower') + tdSql.checkData(1,3,'ready') + tdSql.checkData(2,1,'%s:6230'%self.host) + tdSql.checkData(2,2,'offline') + tdSql.checkData(2,3,'ready') + + def five_dnode_three_mnode(self,dnodenumber): + tdSql.query("show dnodes;") + tdSql.checkData(0,1,'%s:6030'%self.host) + tdSql.checkData(4,1,'%s:6430'%self.host) + tdSql.checkData(0,4,'ready') + tdSql.checkData(4,4,'ready') + tdSql.query("show mnodes;") + tdSql.checkRows(1) + tdSql.checkData(0,1,'%s:6030'%self.host) + tdSql.checkData(0,2,'leader') + tdSql.checkData(0,3,'ready') + + # fisr add three mnodes; + tdSql.execute("create mnode on dnode 2") + tdSql.execute("create mnode on dnode 3") + + # fisrt check statut ready + self.check3mnode() + + tdSql.error("create mnode on dnode 2") + tdSql.query("show dnodes;") + print(tdSql.queryResult) + tdLog.debug("stop all of mnode ") + + # seperate vnode and mnode in different dnodes. + # create database and stable + stopcount =0 + while stopcount < 2: + for i in range(dnodenumber): + # threads=[] + # threads = MyThreadFunc(self.insert_data(i*2,i*2+2)) + threads=threading.Thread(target=self.insert_data, args=(i,i+1)) + threads.start() + self.TDDnodes.stoptaosd(i+1) + self.TDDnodes.starttaosd(i+1) + + if self.checkdnodes(5): + print("123") + threads.join() + else: + print("456") + self.stop_thread(threads) + assert 1 == 2 ,"some dnode started failed" + return False + # self.check3mnode() + self.check3mnode() + + + stopcount+=1 + self.check3mnode() + + + def getConnection(self, dnode): + host = dnode.cfgDict["fqdn"] + port = dnode.cfgDict["serverPort"] + config_dir = dnode.cfgDir + return taos.connect(host=host, port=int(port), config=config_dir) + + + def run(self): + # print(self.master_dnode.cfgDict) + self.buildcluster(5) + self.five_dnode_three_mnode(5) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/7-tmq/basic5.py b/tests/system-test/7-tmq/basic5.py index e44f327995..4ed3be967e 100644 --- a/tests/system-test/7-tmq/basic5.py +++ b/tests/system-test/7-tmq/basic5.py @@ -192,7 +192,7 @@ class TDTestCase: time.sleep(1) tdLog.info("start consume processor") - pollDelay = 100 + pollDelay = 20 showMsg = 1 showRow = 1 @@ -208,7 +208,7 @@ class TDTestCase: os.system(shellCmd) # wait for data ready - prepareEnvThread.join() + # prepareEnvThread.join() tdLog.info("insert process end, and start to check consume result") while 1: diff --git a/tests/system-test/7-tmq/subscribeDb0.py b/tests/system-test/7-tmq/subscribeDb0.py index c9f256ed74..4e8fb04517 100644 --- a/tests/system-test/7-tmq/subscribeDb0.py +++ b/tests/system-test/7-tmq/subscribeDb0.py @@ -322,176 +322,6 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 5 end ...... ") - def tmqCase6(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 6: Produce while one consumers to subscribe tow topic, Each contains one db") - tdLog.info("step 1: create database, stb, ctb and insert data") - # create and start thread - parameterDict = {'cfg': '', \ - 'dbName': 'db60', \ - 'vgroups': 4, \ - 'stbName': 'stb', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 5000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.initConsumerTable() - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - parameterDict2 = {'cfg': '', \ - 'dbName': 'db61', \ - 'vgroups': 4, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 5000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) - - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - tdLog.info("create topics from db") - topicName1 = 'topic_db60' - topicName2 = 'topic_db61' - - tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) - tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName'])) - - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] - topicList = topicName1 + ',' + topicName2 - ifcheckdata = 0 - ifManualCommit = 0 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - #consumerId = 1 - #self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - event.wait() - - tdLog.info("start consume processor") - pollDelay = 100 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - # wait for data ready - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicName1) - tdSql.query("drop topic %s"%topicName2) - - tdLog.printNoPrefix("======== test case 6 end ...... ") - - def tmqCase7(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 7: Produce while two consumers to subscribe tow topic, Each contains one db") - tdLog.info("step 1: create database, stb, ctb and insert data") - # create and start thread - parameterDict = {'cfg': '', \ - 'dbName': 'db70', \ - 'vgroups': 4, \ - 'stbName': 'stb', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 5000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.initConsumerTable() - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - parameterDict2 = {'cfg': '', \ - 'dbName': 'db71', \ - 'vgroups': 4, \ - 'stbName': 'stb2', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 5000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) - - prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) - prepareEnvThread2.start() - - tdLog.info("create topics from db") - topicName1 = 'topic_db60' - topicName2 = 'topic_db61' - - tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) - tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName'])) - - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] - topicList = topicName1 + ',' + topicName2 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - consumerId = 1 - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - event.wait() - - tdLog.info("start consume processor") - pollDelay = 100 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - # wait for data ready - prepareEnvThread.join() - prepareEnvThread2.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 2 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicName1) - tdSql.query("drop topic %s"%topicName2) - - tdLog.printNoPrefix("======== test case 7 end ...... ") - def run(self): tdSql.prepare() @@ -505,8 +335,6 @@ class TDTestCase: self.tmqCase4(cfgPath, buildPath) self.tmqCase5(cfgPath, buildPath) - self.tmqCase6(cfgPath, buildPath) - self.tmqCase7(cfgPath, buildPath) def stop(self): diff --git a/tests/system-test/7-tmq/subscribeDb1.py b/tests/system-test/7-tmq/subscribeDb1.py index ed92a429ae..28a341f8f3 100644 --- a/tests/system-test/7-tmq/subscribeDb1.py +++ b/tests/system-test/7-tmq/subscribeDb1.py @@ -72,10 +72,10 @@ class TDTestCase: if tdSql.getRows() == expectRows: break else: - time.sleep(5) - + time.sleep(5) + for i in range(expectRows): - tdLog.info ("ts: %s, consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 0), tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 3)) return resultList @@ -85,7 +85,7 @@ class TDTestCase: logFile = cfgPath + '/../log/valgrind-tmq.log' shellCmd = 'nohup valgrind --log-file=' + logFile shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' - + if (platform.system().lower() == 'windows'): shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) @@ -97,7 +97,7 @@ class TDTestCase: tdLog.info(shellCmd) os.system(shellCmd) - def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): + def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum): tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) tsql.execute("use %s" %dbName) tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) @@ -151,8 +151,7 @@ class TDTestCase: parameterDict["dbName"],\ parameterDict["vgroups"],\ parameterDict["stbName"],\ - parameterDict["ctbNum"],\ - parameterDict["rowsPerTbl"]) + parameterDict["ctbNum"]) self.insert_data(tsql,\ parameterDict["dbName"],\ @@ -163,16 +162,16 @@ class TDTestCase: parameterDict["startTs"]) return - def tmqCase8(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 8: Produce while one consume to subscribe one db, inclue 1 stb") + def tmqCase6(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 6: Produce while one consumers to subscribe tow topic, Each contains one db") tdLog.info("step 1: create database, stb, ctb and insert data") # create and start thread parameterDict = {'cfg': '', \ - 'dbName': 'db8', \ + 'dbName': 'db60', \ 'vgroups': 4, \ 'stbName': 'stb', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -183,14 +182,32 @@ class TDTestCase: prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() - + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db61', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + tdLog.info("create topics from db") - topicName1 = 'topic_db1' + topicName1 = 'topic_db60' + topicName2 = 'topic_db61' + + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName'])) - tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2 - topicList = topicName1 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ',' + topicName2 ifcheckdata = 0 ifManualCommit = 0 keyList = 'group.id:cgrp1,\ @@ -199,6 +216,9 @@ class TDTestCase: auto.offset.reset:earliest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + #consumerId = 1 + #self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + event.wait() tdLog.info("start consume processor") @@ -208,7 +228,8 @@ class TDTestCase: self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) # wait for data ready - prepareEnvThread.join() + prepareEnvThread.join() + prepareEnvThread2.join() tdLog.info("insert process end, and start to check consume result") expectRows = 1 @@ -221,36 +242,21 @@ class TDTestCase: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") - - tdLog.info("again start consume processer") - self.initConsumerTable() - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows != expectrowcnt: - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - tdSql.query("drop topic %s"%topicName1) + tdSql.query("drop topic %s"%topicName2) - tdLog.printNoPrefix("======== test case 8 end ...... ") + tdLog.printNoPrefix("======== test case 6 end ...... ") - def tmqCase9(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 9: Produce while one consume to subscribe one db, inclue 1 stb") + def tmqCase7(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 7: Produce while two consumers to subscribe tow topic, Each contains one db") tdLog.info("step 1: create database, stb, ctb and insert data") # create and start thread parameterDict = {'cfg': '', \ - 'dbName': 'db9', \ + 'dbName': 'db70', \ 'vgroups': 4, \ 'stbName': 'stb', \ 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ + 'rowsPerTbl': 5000, \ 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath @@ -261,14 +267,32 @@ class TDTestCase: prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() - + + parameterDict2 = {'cfg': '', \ + 'dbName': 'db71', \ + 'vgroups': 4, \ + 'stbName': 'stb2', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 5000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups'])) + + prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) + prepareEnvThread2.start() + tdLog.info("create topics from db") - topicName1 = 'topic_db1' + topicName1 = 'topic_db60' + topicName2 = 'topic_db61' + + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) + tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName'])) - tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2 - topicList = topicName1 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] + topicList = topicName1 + ',' + topicName2 ifcheckdata = 0 ifManualCommit = 1 keyList = 'group.id:cgrp1,\ @@ -277,86 +301,7 @@ class TDTestCase: auto.offset.reset:earliest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - event.wait() - - tdLog.info("start consume processor") - pollDelay = 100 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - # wait for data ready - prepareEnvThread.join() - - tdLog.info("insert process end, and start to check consume result") - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - tdSql.query("select count(*) from %s.%s" %(parameterDict['dbName'], parameterDict['stbName'])) - countOfStb = tdSql.getData(0,0) - print ("====total rows of stb: %d"%countOfStb) - - tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - if totalConsumeRows < expectrowcnt: - tdLog.exit("tmq consume rows error!") - - tdLog.info("again start consume processer") - self.initConsumerTable() - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows2 = 0 - for i in range(expectRows): - totalConsumeRows2 += resultList[i] - - tdLog.info("firstly act consume rows: %d"%(totalConsumeRows)) - tdLog.info("secondly act consume rows: %d, expect consume rows: %d"%(totalConsumeRows2, expectrowcnt)) - if totalConsumeRows + totalConsumeRows2 != expectrowcnt: - tdLog.exit("tmq consume rows error!") - - tdSql.query("drop topic %s"%topicName1) - - tdLog.printNoPrefix("======== test case 9 end ...... ") - - def tmqCase10(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 10: Produce while one consume to subscribe one db, inclue 1 stb") - tdLog.info("step 1: create database, stb, ctb and insert data") - # create and start thread - parameterDict = {'cfg': '', \ - 'dbName': 'db10', \ - 'vgroups': 4, \ - 'stbName': 'stb', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.initConsumerTable() - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - tdLog.info("create topics from db") - topicName1 = 'topic_db1' - - tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicName1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:false,\ - auto.commit.interval.ms:6000,\ - auto.offset.reset:earliest' + consumerId = 1 self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) event.wait() @@ -367,23 +312,12 @@ class TDTestCase: showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - time.sleep(2) - tdLog.info("pkill consume processor") - if (platform.system().lower() == 'windows'): - os.system("TASKKILL /F /IM tmq_sim.exe") - else: - os.system('pkill tmq_sim') - expectRows = 0 - resultList = self.selectConsumeResult(expectRows) - # wait for data ready prepareEnvThread.join() + prepareEnvThread2.join() + tdLog.info("insert process end, and start to check consume result") - - tdLog.info("again start consume processer") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - expectRows = 1 + expectRows = 2 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): @@ -393,85 +327,10 @@ class TDTestCase: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") - time.sleep(15) tdSql.query("drop topic %s"%topicName1) + tdSql.query("drop topic %s"%topicName2) - tdLog.printNoPrefix("======== test case 10 end ...... ") - - def tmqCase11(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 11: Produce while one consume to subscribe one db, inclue 1 stb") - tdLog.info("step 1: create database, stb, ctb and insert data") - # create and start thread - parameterDict = {'cfg': '', \ - 'dbName': 'db11', \ - 'vgroups': 4, \ - 'stbName': 'stb', \ - 'ctbNum': 10, \ - 'rowsPerTbl': 10000, \ - 'batchNum': 100, \ - 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 - parameterDict['cfg'] = cfgPath - - self.initConsumerTable() - - tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) - - prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) - prepareEnvThread.start() - - tdLog.info("create topics from db") - topicName1 = 'topic_db1' - - tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) - consumerId = 0 - expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] - topicList = topicName1 - ifcheckdata = 0 - ifManualCommit = 1 - keyList = 'group.id:cgrp1,\ - enable.auto.commit:true,\ - auto.commit.interval.ms:1000,\ - auto.offset.reset:earliest' - self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - - event.wait() - - tdLog.info("start consume processor") - pollDelay = 20 - showMsg = 1 - showRow = 1 - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - time.sleep(3) - tdLog.info("pkill consume processor") - if (platform.system().lower() == 'windows'): - os.system("TASKKILL /F /IM tmq_sim.exe") - else: - os.system('pkill tmq_sim') - expectRows = 0 - resultList = self.selectConsumeResult(expectRows) - - # wait for data ready - prepareEnvThread.join() - tdLog.info("insert process end, and start to check consume result") - - tdLog.info("again start consume processer") - self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - - expectRows = 1 - resultList = self.selectConsumeResult(expectRows) - totalConsumeRows = 0 - for i in range(expectRows): - totalConsumeRows += resultList[i] - - if totalConsumeRows >= expectrowcnt or totalConsumeRows <= 0: - tdLog.info("act consume rows: %d, expect consume rows between %d and 0"%(totalConsumeRows, expectrowcnt)) - tdLog.exit("tmq consume rows error!") - - time.sleep(15) - tdSql.query("drop topic %s"%topicName1) - - tdLog.printNoPrefix("======== test case 11 end ...... ") + tdLog.printNoPrefix("======== test case 7 end ...... ") def run(self): tdSql.prepare() @@ -484,10 +343,9 @@ class TDTestCase: cfgPath = buildPath + "/../sim/psim/cfg" tdLog.info("cfgPath: %s" % cfgPath) - self.tmqCase8(cfgPath, buildPath) - self.tmqCase9(cfgPath, buildPath) - self.tmqCase10(cfgPath, buildPath) - self.tmqCase11(cfgPath, buildPath) + self.tmqCase6(cfgPath, buildPath) + self.tmqCase7(cfgPath, buildPath) + def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/subscribeDb2.py b/tests/system-test/7-tmq/subscribeDb2.py new file mode 100644 index 0000000000..af31e802b3 --- /dev/null +++ b/tests/system-test/7-tmq/subscribeDb2.py @@ -0,0 +1,347 @@ + +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files or "taosd.exe" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("ts: %s, consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 0), tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + if (platform.system().lower() == 'windows'): + shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" + else: + shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): + tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("use %s" %dbName) + tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + event.set() + tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + t = time.time() + startTs = int(round(t * 1000)) + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + print ("input parameters:") + print (parameterDict) + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + self.create_tables(tsql,\ + parameterDict["dbName"],\ + parameterDict["vgroups"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"]) + + self.insert_data(tsql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"],\ + parameterDict["startTs"]) + return + + def tmqCase8(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 8: Produce while one consume to subscribe one db, inclue 1 stb") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db8', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2 + topicList = topicName1 + ifcheckdata = 0 + ifManualCommit = 0 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # wait for data ready + prepareEnvThread.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + + tdLog.info("again start consume processer") + self.initConsumerTable() + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 8 end ...... ") + + def tmqCase9(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 9: Produce while one consume to subscribe one db, inclue 1 stb") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db9', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2 + topicList = topicName1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + # wait for data ready + prepareEnvThread.join() + + tdLog.info("insert process end, and start to check consume result") + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + tdSql.query("select count(*) from %s.%s" %(parameterDict['dbName'], parameterDict['stbName'])) + countOfStb = tdSql.getData(0,0) + print ("====total rows of stb: %d"%countOfStb) + + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + if totalConsumeRows < expectrowcnt: + tdLog.exit("tmq consume rows error!") + + tdLog.info("again start consume processer") + self.initConsumerTable() + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows2 = 0 + for i in range(expectRows): + totalConsumeRows2 += resultList[i] + + tdLog.info("firstly act consume rows: %d"%(totalConsumeRows)) + tdLog.info("secondly act consume rows: %d, expect consume rows: %d"%(totalConsumeRows2, expectrowcnt)) + if totalConsumeRows + totalConsumeRows2 != expectrowcnt: + tdLog.exit("tmq consume rows error!") + + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 9 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + self.tmqCase8(cfgPath, buildPath) + self.tmqCase9(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/subscribeDb3.py b/tests/system-test/7-tmq/subscribeDb3.py new file mode 100644 index 0000000000..6973f4c51f --- /dev/null +++ b/tests/system-test/7-tmq/subscribeDb3.py @@ -0,0 +1,337 @@ + +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class TDTestCase: + hostname = socket.gethostname() + #rpcDebugFlagVal = '143' + #clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + #updatecfgDict["rpcDebugFlag"] = rpcDebugFlagVal + #print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + #tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files or "taosd.exe" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def newcur(self,cfg,host,port): + user = "root" + password = "taosdata" + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def initConsumerTable(self,cdbName='cdb'): + tdLog.info("create consume database, and consume info table, and consume result table") + tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) + tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + + tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) + tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) + + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + sql = "insert into %s.consumeinfo values "%cdbName + sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) + tdLog.info("consume info sql: %s"%sql) + tdSql.query(sql) + + def selectConsumeResult(self,expectRows,cdbName='cdb'): + resultList=[] + while 1: + tdSql.query("select * from %s.consumeresult"%cdbName) + #tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3)) + if tdSql.getRows() == expectRows: + break + else: + time.sleep(5) + + for i in range(expectRows): + tdLog.info ("ts: %s, consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 0), tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) + resultList.append(tdSql.getData(i , 3)) + + return resultList + + def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): + if valgrind == 1: + logFile = cfgPath + '/../log/valgrind-tmq.log' + shellCmd = 'nohup valgrind --log-file=' + logFile + shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' + + if (platform.system().lower() == 'windows'): + shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" + else: + shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" + tdLog.info(shellCmd) + os.system(shellCmd) + + def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): + tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("use %s" %dbName) + tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) + pre_create = "create table" + sql = pre_create + #tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + for i in range(ctbNum): + sql += " %s_%d using %s tags(%d)"%(stbName,i,stbName,i+1) + if (i > 0) and (i%100 == 0): + tsql.execute(sql) + sql = pre_create + if sql != pre_create: + tsql.execute(sql) + + event.set() + tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum)) + return + + def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + t = time.time() + startTs = int(round(t * 1000)) + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) + for i in range(ctbNum): + sql += " %s_%d values "%(stbName,i) + for j in range(rowsPerTbl): + sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j) + if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + if j < rowsPerTbl - 1: + sql = "insert into %s_%d values " %(stbName,i) + else: + sql = "insert into " + #end sql + if sql != pre_insert: + #print("insert sql:%s"%sql) + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareEnv(self, **parameterDict): + print ("input parameters:") + print (parameterDict) + # create new connector for my thread + tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) + self.create_tables(tsql,\ + parameterDict["dbName"],\ + parameterDict["vgroups"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"]) + + self.insert_data(tsql,\ + parameterDict["dbName"],\ + parameterDict["stbName"],\ + parameterDict["ctbNum"],\ + parameterDict["rowsPerTbl"],\ + parameterDict["batchNum"],\ + parameterDict["startTs"]) + return + + def tmqCase10(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 10: Produce while one consume to subscribe one db, inclue 1 stb") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db10', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicName1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:false,\ + auto.commit.interval.ms:6000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 100 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + time.sleep(2) + tdLog.info("pkill consume processor") + if (platform.system().lower() == 'windows'): + os.system("TASKKILL /F /IM tmq_sim.exe") + else: + os.system('pkill tmq_sim') + expectRows = 0 + resultList = self.selectConsumeResult(expectRows) + + # wait for data ready + prepareEnvThread.join() + tdLog.info("insert process end, and start to check consume result") + + tdLog.info("again start consume processer") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows != expectrowcnt: + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + time.sleep(15) + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 10 end ...... ") + + def tmqCase11(self, cfgPath, buildPath): + tdLog.printNoPrefix("======== test case 11: Produce while one consume to subscribe one db, inclue 1 stb") + tdLog.info("step 1: create database, stb, ctb and insert data") + # create and start thread + parameterDict = {'cfg': '', \ + 'dbName': 'db11', \ + 'vgroups': 4, \ + 'stbName': 'stb', \ + 'ctbNum': 10, \ + 'rowsPerTbl': 10000, \ + 'batchNum': 100, \ + 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 + parameterDict['cfg'] = cfgPath + + self.initConsumerTable() + + tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) + + prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) + prepareEnvThread.start() + + tdLog.info("create topics from db") + topicName1 = 'topic_db1' + + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) + consumerId = 0 + expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + topicList = topicName1 + ifcheckdata = 0 + ifManualCommit = 1 + keyList = 'group.id:cgrp1,\ + enable.auto.commit:true,\ + auto.commit.interval.ms:1000,\ + auto.offset.reset:earliest' + self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) + + event.wait() + + tdLog.info("start consume processor") + pollDelay = 20 + showMsg = 1 + showRow = 1 + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + time.sleep(6) + tdLog.info("pkill consume processor") + if (platform.system().lower() == 'windows'): + os.system("TASKKILL /F /IM tmq_sim.exe") + else: + os.system('pkill tmq_sim') + expectRows = 0 + resultList = self.selectConsumeResult(expectRows) + + # wait for data ready + prepareEnvThread.join() + tdLog.info("insert process end, and start to check consume result") + + tdLog.info("again start consume processer") + self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) + + expectRows = 1 + resultList = self.selectConsumeResult(expectRows) + totalConsumeRows = 0 + for i in range(expectRows): + totalConsumeRows += resultList[i] + + if totalConsumeRows >= expectrowcnt or totalConsumeRows <= 0: + tdLog.info("act consume rows: %d, expect consume rows between %d and 0"%(totalConsumeRows, expectrowcnt)) + tdLog.exit("tmq consume rows error!") + + time.sleep(15) + tdSql.query("drop topic %s"%topicName1) + + tdLog.printNoPrefix("======== test case 11 end ...... ") + + def run(self): + tdSql.prepare() + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + cfgPath = buildPath + "/../sim/psim/cfg" + tdLog.info("cfgPath: %s" % cfgPath) + + self.tmqCase10(cfgPath, buildPath) + self.tmqCase11(cfgPath, buildPath) + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 7577ef340c..41004cc5a2 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -99,17 +99,25 @@ python3 ./test.py -f 2-query/function_stateduration.py python3 ./test.py -f 2-query/statecount.py python3 ./test.py -f 2-query/tail.py python3 ./test.py -f 2-query/ttl_comment.py +python3 ./test.py -f 2-query/distribute_agg_count.py +python3 ./test.py -f 2-query/distribute_agg_max.py +python3 ./test.py -f 2-query/distribute_agg_min.py +python3 ./test.py -f 2-query/distribute_agg_sum.py +python3 ./test.py -f 2-query/distribute_agg_spread.py +python3 ./test.py -f 2-query/distribute_agg_apercentile.py python3 ./test.py -f 6-cluster/5dnode1mnode.py python3 ./test.py -f 6-cluster/5dnode2mnode.py #python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py +#python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py # BUG python3 ./test.py -f 6-cluster/5dnode3mnodeStopInsert.py python3 ./test.py -f 7-tmq/basic5.py python3 ./test.py -f 7-tmq/subscribeDb.py python3 ./test.py -f 7-tmq/subscribeDb0.py python3 ./test.py -f 7-tmq/subscribeDb1.py +python3 ./test.py -f 7-tmq/subscribeDb2.py +python3 ./test.py -f 7-tmq/subscribeDb3.py python3 ./test.py -f 7-tmq/subscribeStb.py python3 ./test.py -f 7-tmq/subscribeStb0.py python3 ./test.py -f 7-tmq/subscribeStb1.py diff --git a/tests/test/c/sdbDump.c b/tests/test/c/sdbDump.c index e5986cf4dd..612b870b7e 100644 --- a/tests/test/c/sdbDump.c +++ b/tests/test/c/sdbDump.c @@ -283,7 +283,8 @@ void dumpTrans(SSdb *pSdb, SJson *json) { tjsonAddIntegerToObject(item, "conflict", pObj->conflict); tjsonAddIntegerToObject(item, "exec", pObj->exec); tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime)); - tjsonAddStringToObject(item, "dbname", pObj->dbname); + tjsonAddStringToObject(item, "dbname1", pObj->dbname1); + tjsonAddStringToObject(item, "dbname2", pObj->dbname2); tjsonAddIntegerToObject(item, "commitLogNum", taosArrayGetSize(pObj->commitActions)); tjsonAddIntegerToObject(item, "redoActionNum", taosArrayGetSize(pObj->redoActions)); tjsonAddIntegerToObject(item, "undoActionNum", taosArrayGetSize(pObj->undoActions)); @@ -294,8 +295,9 @@ void dumpTrans(SSdb *pSdb, SJson *json) { void dumpHeader(SSdb *pSdb, SJson *json) { tjsonAddIntegerToObject(json, "sver", 1); - tjsonAddStringToObject(json, "curVer", i642str(pSdb->curVer)); - tjsonAddStringToObject(json, "curTerm", i642str(pSdb->curTerm)); + tjsonAddStringToObject(json, "applyIndex", i642str(pSdb->applyIndex)); + tjsonAddStringToObject(json, "applyTerm", i642str(pSdb->applyTerm)); + tjsonAddStringToObject(json, "applyConfig", i642str(pSdb->applyConfig)); SJson *maxIdsJson = tjsonCreateObject(); tjsonAddItemToObject(json, "maxIds", maxIdsJson); diff --git a/tests/test/c/tmqDemo.c b/tests/test/c/tmqDemo.c index 5a972e071a..02fd3c1396 100644 --- a/tests/test/c/tmqDemo.c +++ b/tests/test/c/tmqDemo.c @@ -353,8 +353,8 @@ tmq_list_t* build_topic_list() { void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) { static const int MIN_COMMIT_COUNT = 1000; - int msg_count = 0; - tmq_resp_err_t err; + int msg_count = 0; + int32_t err; if ((err = tmq_subscribe(tmq, topics))) { fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(err)); @@ -379,7 +379,7 @@ void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) { } void perf_loop(tmq_t* tmq, tmq_list_t* topics, int32_t totalMsgs, int64_t walLogSize) { - tmq_resp_err_t err; + int32_t err; if ((err = tmq_subscribe(tmq, topics))) { fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(err)); diff --git a/tests/test/c/tmqSim.c b/tests/test/c/tmqSim.c index 8455bd9890..0f78a003d6 100644 --- a/tests/test/c/tmqSim.c +++ b/tests/test/c/tmqSim.c @@ -62,7 +62,7 @@ typedef struct { tmq_t* tmq; tmq_list_t* topicList; - + int32_t numOfVgroups; int32_t rowsOfPerVgroups[MAX_VGROUP_CNT][2]; // [i][0]: vgroup id, [i][1]: rows of consume int64_t ts; @@ -74,7 +74,7 @@ typedef struct { char cdbName[32]; char dbName[32]; int32_t showMsgFlag; - int32_t showRowFlag; + int32_t showRowFlag; int32_t saveRowFlag; int32_t consumeDelay; // unit s int32_t numOfThread; @@ -108,26 +108,20 @@ static void printHelp() { } char* getCurrentTimeString(char* timeString) { - time_t tTime = taosGetTimestampSec(); + time_t tTime = taosGetTimestampSec(); struct tm tm = *taosLocalTime(&tTime, NULL); - sprintf(timeString, "%d-%02d-%02d %02d:%02d:%02d", - tm.tm_year + 1900, - tm.tm_mon + 1, - tm.tm_mday, - tm.tm_hour, - tm.tm_min, - tm.tm_sec); + sprintf(timeString, "%d-%02d-%02d %02d:%02d:%02d", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, + tm.tm_min, tm.tm_sec); return timeString; } - void initLogFile() { char filename[256]; - char tmpString[128]; + char tmpString[128]; - sprintf(filename,"%s/../log/tmqlog_%s.txt", configDir, getCurrentTimeString(tmpString)); - //sprintf(filename, "%s/../log/tmqlog.txt", configDir); + sprintf(filename, "%s/../log/tmqlog_%s.txt", configDir, getCurrentTimeString(tmpString)); + // sprintf(filename, "%s/../log/tmqlog.txt", configDir); #ifdef WINDOWS for (int i = 2; i < sizeof(filename); i++) { if (filename[i] == ':') filename[i] = '-'; @@ -249,17 +243,18 @@ void addRowsToVgroupId(SThreadInfo* pInfo, int32_t vgroupId, int32_t rows) { for (i = 0; i < pInfo->numOfVgroups; i++) { if (vgroupId == pInfo->rowsOfPerVgroups[i][0]) { pInfo->rowsOfPerVgroups[i][1] += rows; - return; - } + return; + } } pInfo->rowsOfPerVgroups[pInfo->numOfVgroups][0] = vgroupId; pInfo->rowsOfPerVgroups[pInfo->numOfVgroups][1] += rows; pInfo->numOfVgroups++; - + taosFprintfFile(g_fp, "consume id %d, add one new vogroup id: %d\n", pInfo->consumerId, vgroupId); if (pInfo->numOfVgroups > MAX_VGROUP_CNT) { - taosFprintfFile(g_fp, "====consume id %d, vgroup num %d over than 32. new vgroupId: %d\n", pInfo->consumerId, pInfo->numOfVgroups, vgroupId); + taosFprintfFile(g_fp, "====consume id %d, vgroup num %d over than 32. new vgroupId: %d\n", pInfo->consumerId, + pInfo->numOfVgroups, vgroupId); taosCloseFile(&g_fp); exit(-1); } @@ -277,7 +272,8 @@ int32_t saveConsumeContentToTbl(SThreadInfo* pInfo, char* buf) { TAOS* pConn = taos_connect(NULL, "root", "taosdata", NULL, 0); assert(pConn != NULL); - sprintf(sqlStr, "insert into %s.content_%d values (%"PRId64", \'%s\')", g_stConfInfo.cdbName, pInfo->consumerId, pInfo->ts++, buf); + sprintf(sqlStr, "insert into %s.content_%d values (%" PRId64 ", \'%s\')", g_stConfInfo.cdbName, pInfo->consumerId, + pInfo->ts++, buf); TAOS_RES* pRes = taos_query(pConn, sqlStr); if (taos_errno(pRes) != 0) { pError("error in insert consume result, reason:%s\n", taos_errstr(pRes)); @@ -295,12 +291,13 @@ int32_t saveConsumeContentToTbl(SThreadInfo* pInfo, char* buf) { static int32_t msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIndex) { char buf[1024]; int32_t totalRows = 0; - + // printf("topic: %s\n", tmq_get_topic_name(msg)); int32_t vgroupId = tmq_get_vgroup_id(msg); - + taosFprintfFile(g_fp, "msg index:%" PRId64 ", consumerId: %d\n", msgIndex, pInfo->consumerId); - //taosFprintfFile(g_fp, "topic: %s, vgroupId: %d, tableName: %s\n", tmq_get_topic_name(msg), vgroupId, tmq_get_table_name(msg)); + // taosFprintfFile(g_fp, "topic: %s, vgroupId: %d, tableName: %s\n", tmq_get_topic_name(msg), vgroupId, + // tmq_get_table_name(msg)); taosFprintfFile(g_fp, "topic: %s, vgroupId: %d\n", tmq_get_topic_name(msg), vgroupId); while (1) { @@ -316,9 +313,9 @@ static int32_t msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIndex) const char* tbName = tmq_get_table_name(msg); if (0 != g_stConfInfo.showRowFlag) { - taosFprintfFile(g_fp, "tbname:%s, rows[%d]: %s\n", (tbName != NULL ? tbName:"null table"), totalRows, buf); - if (0 != g_stConfInfo.saveRowFlag) { - saveConsumeContentToTbl(pInfo, buf); + taosFprintfFile(g_fp, "tbname:%s, rows[%d]: %s\n", (tbName != NULL ? tbName : "null table"), totalRows, buf); + if (0 != g_stConfInfo.saveRowFlag) { + saveConsumeContentToTbl(pInfo, buf); } } @@ -326,7 +323,7 @@ static int32_t msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIndex) } addRowsToVgroupId(pInfo, vgroupId, totalRows); - + return totalRows; } @@ -342,8 +339,8 @@ int queryDB(TAOS* taos, char* command) { return 0; } -static void tmq_commit_cb_print(tmq_t* tmq, tmq_resp_err_t resp, tmq_topic_vgroup_list_t* offsets, void* param) { - pError("tmq_commit_cb_print() commit %d\n", resp); +static void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) { + pError("tmq_commit_cb_print() commit %d\n", code); } void build_consumer(SThreadInfo* pInfo) { @@ -401,16 +398,11 @@ int32_t saveConsumeResult(SThreadInfo* pInfo) { int64_t now = taosGetTimestampMs(); // schema: ts timestamp, consumerid int, consummsgcnt bigint, checkresult int - sprintf(sqlStr, "insert into %s.consumeresult values (%"PRId64", %d, %" PRId64 ", %" PRId64 ", %d)", - g_stConfInfo.cdbName, - now, - pInfo->consumerId, - pInfo->consumeMsgCnt, - pInfo->consumeRowCnt, - pInfo->checkresult); + sprintf(sqlStr, "insert into %s.consumeresult values (%" PRId64 ", %d, %" PRId64 ", %" PRId64 ", %d)", + g_stConfInfo.cdbName, now, pInfo->consumerId, pInfo->consumeMsgCnt, pInfo->consumeRowCnt, pInfo->checkresult); char tmpString[128]; - taosFprintfFile(g_fp, "%s, consume id %d result: %s\n", getCurrentTimeString(tmpString), pInfo->consumerId ,sqlStr); + taosFprintfFile(g_fp, "%s, consume id %d result: %s\n", getCurrentTimeString(tmpString), pInfo->consumerId, sqlStr); TAOS_RES* pRes = taos_query(pConn, sqlStr); if (taos_errno(pRes) != 0) { @@ -421,7 +413,7 @@ int32_t saveConsumeResult(SThreadInfo* pInfo) { taos_free_result(pRes); - #if 0 +#if 0 // vgroups for (i = 0; i < pInfo->numOfVgroups; i++) { // schema: ts timestamp, consumerid int, consummsgcnt bigint, checkresult int @@ -445,19 +437,20 @@ int32_t saveConsumeResult(SThreadInfo* pInfo) { taos_free_result(pRes); } - #endif +#endif return 0; } void loop_consume(SThreadInfo* pInfo) { - tmq_resp_err_t err; + int32_t code; int64_t totalMsgs = 0; int64_t totalRows = 0; char tmpString[128]; - taosFprintfFile(g_fp, "%s consumer id %d start to loop pull msg\n", getCurrentTimeString(tmpString), pInfo->consumerId); + taosFprintfFile(g_fp, "%s consumer id %d start to loop pull msg\n", getCurrentTimeString(tmpString), + pInfo->consumerId); pInfo->ts = taosGetTimestampMs(); @@ -473,7 +466,7 @@ void loop_consume(SThreadInfo* pInfo) { totalMsgs++; if (totalRows >= pInfo->expectMsgCnt) { - char tmpString[128]; + char tmpString[128]; taosFprintfFile(g_fp, "%s over than expect rows, so break consume\n", getCurrentTimeString(tmpString)); break; } @@ -503,8 +496,8 @@ void* consumeThreadFunc(void* param) { return NULL; } - tmq_resp_err_t err = tmq_subscribe(pInfo->tmq, pInfo->topicList); - if (err) { + int32_t err = tmq_subscribe(pInfo->tmq, pInfo->topicList); + if (err != 0) { pError("tmq_subscribe() fail, reason: %s\n", tmq_err2str(err)); exit(-1); } @@ -519,17 +512,19 @@ void* consumeThreadFunc(void* param) { pPrint("tmq_commit() manual commit when consume end.\n"); /*tmq_commit(pInfo->tmq, NULL, 0);*/ tmq_commit_sync(pInfo->tmq, NULL); + taosFprintfFile(g_fp, "tmq_commit() manual commit over.\n"); + pPrint("tmq_commit() manual commit over.\n"); } err = tmq_unsubscribe(pInfo->tmq); - if (err) { + if (err != 0) { pError("tmq_unsubscribe() fail, reason: %s\n", tmq_err2str(err)); /*pInfo->consumeMsgCnt = -1;*/ /*return NULL;*/ } err = tmq_consumer_close(pInfo->tmq); - if (err) { + if (err != 0) { pError("tmq_consumer_close() fail, reason: %s\n", tmq_err2str(err)); /*exit(-1);*/ } diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c index dbc3c2c460..f97a13d2c5 100644 --- a/tests/tsim/src/simExe.c +++ b/tests/tsim/src/simExe.c @@ -458,11 +458,17 @@ bool simExecuteSystemContentCmd(SScript *script, char *option) { char buf[4096] = {0}; char buf1[4096 + 512] = {0}; char filename[400] = {0}; - sprintf(filename, "%s/%s.tmp", simScriptDir, script->fileName); + sprintf(filename, "%s" TD_DIRSEP "%s.tmp", simScriptDir, script->fileName); +#ifdef WINDOWS + sprintf(buf, "cd %s && ", simScriptDir); + simVisuallizeOption(script, option, buf + strlen(buf)); + sprintf(buf1, "%s > %s 2>nul", buf, filename); +#else sprintf(buf, "cd %s; ", simScriptDir); simVisuallizeOption(script, option, buf + strlen(buf)); sprintf(buf1, "%s > %s 2>/dev/null", buf, filename); +#endif sprintf(script->system_exit_code, "%d", system(buf1)); simStoreSystemContentResult(script, filename); diff --git a/tests/tsim/src/simParse.c b/tests/tsim/src/simParse.c index 638c4a1ccb..5b6dda4dae 100644 --- a/tests/tsim/src/simParse.c +++ b/tests/tsim/src/simParse.c @@ -206,7 +206,7 @@ SScript *simParseScript(char *fileName) { for (int32_t i = 0; i < cmdlen; ++i) { if (buffer[i] == '\r' || buffer[i] == '\n') { - buffer[i] = ' '; + buffer[i] = '\0'; } } diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 8ed0e9ddcf..1f29237d38 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -855,8 +855,7 @@ void shellGetGrantInfo() { if (code == TSDB_CODE_OPS_NOT_SUPPORT) { fprintf(stdout, "Server is Community Edition, %s\n\n", sinfo); } else { - fprintf(stderr, "Failed to check Server Edition, Reason:0x%04x:%s\n\n", taos_errno(shell.conn), - taos_errstr(shell.conn)); + fprintf(stderr, "Failed to check Server Edition, Reason:0x%04x:%s\n\n", code, taos_errstr(tres)); } return; } diff --git a/tools/taosadapter b/tools/taosadapter new file mode 160000 index 0000000000..9ce3f5c98e --- /dev/null +++ b/tools/taosadapter @@ -0,0 +1 @@ +Subproject commit 9ce3f5c98ef95d9c7c596c4ed7302b0ed69a92b2